prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
The :mod:`parsimony.functions.penalties` module contains the penalties used to
constrain the loss functions. These represent mathematical functions and
should thus have properties used by the corresponding algorithms. These
properties are defined in :mod:`parsimony.functions.properties`.
Penalties should be stateless. Penalties may be shared and copied and should
therefore not hold anything that cannot be recomputed the next time it is
called.
Created on Mon Apr 22 10:54:29 2013
Copyright (c) 2013-2017, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>, <NAME>, <NAME> and
<NAME>
@email: <EMAIL>, <EMAIL>
@license: BSD 3-clause.
"""
import numpy as np
import scipy.optimize as optimize
import scipy.sparse as sparse
try:
from . import properties # Only works when imported as a package.
except (ValueError, SystemError):
import parsimony.functions.properties as properties # Run as a script.
import parsimony.utils.maths as maths
import parsimony.utils.consts as consts
import parsimony.utils.linalgs as linalgs
__all__ = ["ZeroFunction", "L1", "L0", "LInf", "L2", "L2Squared",
"L1L2Squared", "GraphNet",
"QuadraticConstraint", "RGCCAConstraint", "RidgeSquaredError",
"LinearConstraint",
"LinearVariableConstraint",
"SufficientDescentCondition",
"KernelL2Squared"]
class ZeroFunction(properties.AtomicFunction,
properties.Gradient,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
def __init__(self, l=1.0, c=0.0, penalty_start=0):
"""
Parameters
----------
l : float
A non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : float
The limit of the constraint. The function is feasible if
||\beta||_1 <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : int
A non-negative integer. The number of columns, variables etc., to
be exempt from penalisation. Equivalently, the first index to be
penalised. Default is 0, all columns are included.
"""
self.l = max(0.0, float(l))
self.c = float(c)
if self.c < 0.0:
raise ValueError("A negative constraint parameter does not make "
"sense, since the function is always zero.")
self.penalty_start = max(0, int(penalty_start))
self.reset()
def reset(self):
self._zero = None
def f(self, x):
"""Function value.
"""
return 0.0
def grad(self, x):
"""Gradient of the function.
From the interface "Gradient".
"""
if self._zero is None:
self._zero = np.zeros(x.shape)
return self._zero
def prox(self, x, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
return x
def proj(self, x, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
"""
return x
def feasible(self, x):
"""Feasibility of the constraint.
From the interface "Constraint".
"""
return self.c >= 0.0
class L1(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator,
properties.SubGradient):
"""The L1 function in a penalty formulation has the form
f(\beta) = l * (||\beta||_1 - c),
where ||\beta||_1 is the L1 loss function. The constrained version has the
form
||\beta||_1 <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
||\beta||_1 <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = float(l)
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return self.l * (maths.norm1(beta_) - self.c)
def subgrad(self, beta, clever=True, random_state=None, **kwargs):
if random_state is None:
random_state = np.random.RandomState()
izero = np.abs(beta) < 10.0 * consts.FLOAT_EPSILON
inonzero = np.negative(izero)
grad = np.zeros(beta.shape)
grad[inonzero] = np.sign(beta[inonzero])
if clever:
# The "clever" part here is that since we are already at the
# minimum of the penalty, we have no reason to move away from here.
# Hence, the subgradient is zero at this point.
grad[izero] = np.zeros(np.sum(izero))
else:
grad[izero] = random_state.uniform(-1, 1, np.sum(izero))
return self.l * grad
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
l = self.l * factor
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
prox = (np.abs(beta_) > l) * (beta_ - l * np.sign(beta_ - l))
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :], prox))
return prox
def proj(self, beta, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
p = beta_.shape[0]
abs_beta = np.absolute(beta_)
norm1 = np.sum(abs_beta)
if norm1 <= self.c: # Feasible?
return beta
a = np.flipud(np.sort(abs_beta, axis=0)).ravel()
suma = np.cumsum(a)
phi = np.zeros((p + 1,))
np.multiply(a, np.arange(-1, -p - 1, -1), phi[:p])
phi[:p] += (suma - self.c)
phi[p] = suma[p - 1] - self.c
# TODO: BUG: i may be equal to p => IndexError: list index out of range
i = np.searchsorted(phi, 0.0) # First positive (or zero).
if phi[i] < 0.0:
# TODO: This should not be able to happen! Do we know it doesn't?
return self.__proj_old(beta)
i -= 1 # The last negative phi before positive (or zero).
if phi[i] >= 0.0:
# TODO: This should not be able to happen! Do we know it doesn't?
return self.__proj_old(beta)
l = a[i] + phi[i] / float(i + 1) # Find the Lagrange multiplier.
# The correction by eps is to nudge the L1 norm just below self.c.
eps = consts.FLOAT_EPSILON
l += eps
return (np.abs(beta_) > l) * (beta_ - l * np.sign(beta_ - l))
def __proj_old(self, beta):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
abs_beta = np.absolute(beta_)
norm1 = np.sum(abs_beta)
if norm1 <= self.c: # Feasible?
return beta
from parsimony.algorithms.utils import Bisection
bisection = Bisection(force_negative=True,
parameter_positive=True,
parameter_negative=False,
parameter_zero=False,
eps=1e-8)
class F(properties.Function):
def __init__(self, beta, c):
self.beta = beta
self.c = c
def f(self, l):
beta = (abs_beta > l) \
* (self.beta - l * np.sign(self.beta - l))
return maths.norm1(beta) - self.c
func = F(beta_, self.c)
l = bisection.run(func, [0.0, np.max(np.abs(beta_))])
return (abs_beta > l) * (beta_ - l * np.sign(beta_ - l))
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return maths.norm1(beta_) <= self.c
class L0(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the "pseudo" L0 function
f(x) = l * (||x||_0 - c),
where ||x||_0 is the L0 loss function. The constrainted version has the
form
||x||_0 <= c.
Warning: Note that this function is not convex, and the regular assumptions
when using it in e.g. ISTA or FISTA will not apply. Nevertheless, it will
still converge to a local minimum if we can guarantee that we obtain a
reduction of the smooth part in each step. See e.g.:
http://eprints.soton.ac.uk/142499/1/BD_NIHT09.pdf
http://people.ee.duke.edu/~lcarin/blumensath.pdf
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
||x||_0 <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, x):
"""Function value.
From the interface "Function".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> l0 = L0(l=0.5)
>>> maths.norm0(x)
10
>>> l0.f(x) - 0.5 * maths.norm0(x)
0.0
>>> x[0, 0] = 0.0
>>> maths.norm0(x)
9
>>> l0.f(x) - 0.5 * maths.norm0(x)
0.0
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
return self.l * (maths.norm0(x_) - self.c)
def prox(self, x, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> l0 = L0(l=0.5)
>>> maths.norm0(x)
10
>>> np.linalg.norm(l0.prox(x) - np.array([[0. ],
... [0.95071431],
... [0.73199394],
... [0.59865848],
... [0. ],
... [0. ],
... [0. ],
... [0.86617615],
... [0.60111501],
... [0.70807258]])) < 5e-8
True
>>> l0.f(l0.prox(x))
3.0
>>> 0.5 * maths.norm0(l0.prox(x))
3.0
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
l = self.l * factor
prox = x_ * (np.abs(x_) > l) # Hard thresholding.
prox = np.vstack((x[:self.penalty_start, :], # Unregularised variables
prox))
return prox
def proj(self, x):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> l0 = L0(c=5.0)
>>> l0.proj(x)
array([[ 0. ],
[ 0.90142861],
[ 0. ],
[ 0. ],
[-0.68796272],
[-0.68801096],
[-0.88383278],
[ 0.73235229],
[ 0. ],
[ 0. ]])
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
if maths.norm0(x_) <= self.c:
return x
K = int(np.floor(self.c) + 0.5)
ind = np.abs(x_.ravel()).argsort()[:K]
y = np.copy(x_)
y[ind] = 0.0
if self.penalty_start > 0:
# Add the unregularised variables.
y = np.vstack((x[:self.penalty_start, :],
y))
return y
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L0
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> l0 = L0(c=5.0)
>>> l0.feasible(x)
False
>>> l0.feasible(l0.proj(x))
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return maths.norm0(beta_) <= self.c
class LInf(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the L-infinity function
f(x) = l * (||x||_inf - c),
where ||x||_inf is the L-infinity loss function. The constrainted version
has the form
||x||_inf <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
||x||_inf <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = float(l)
self.c = float(c)
self.penalty_start = int(penalty_start)
def f(self, x):
"""Function value.
From the interface "Function".
Parameters
----------
x : Numpy array. The point at which to evaluate the function.
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> linf = LInf(l=1.1)
>>> linf.f(x) - 1.1 * maths.normInf(x)
0.0
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
return self.l * (maths.normInf(x_) - self.c)
def prox(self, x, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>> import parsimony.utils.maths as maths
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1)
>>> linf = LInf(l=1.45673045, c=0.5)
>>> linf_prox = linf.prox(x)
>>> np.linalg.norm(linf_prox - np.asarray([[0.37454012],
... [0.5 ],
... [0.5 ],
... [0.5 ],
... [0.15601864],
... [0.15599452],
... [0.05808361],
... [0.5 ],
... [0.5 ],
... [0.5 ]])) < 5e-8
True
>>> linf_proj = linf.proj(x)
>>> np.linalg.norm(linf_proj - np.asarray([[0.37454012],
... [0.5 ],
... [0.5 ],
... [0.5 ],
... [0.15601864],
... [0.15599452],
... [0.05808361],
... [0.5 ],
... [0.5 ],
... [0.5 ]])) < 5e-8
True
>>> np.linalg.norm(linf_prox - linf_proj) < 5e-8
True
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
l = self.l * factor
l1 = L1(c=l) # Project onto an L1 ball with radius c=l.
y = x_ - l1.proj(x_)
# TODO: Check if this is correct!
# Put the unregularised variables back.
if self.penalty_start > 0:
y = np.vstack((x[:self.penalty_start, :],
y))
return y
def proj(self, x):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> linf = LInf(c=0.618)
>>> linf.proj(x)
array([[-0.25091976],
[ 0.618 ],
[ 0.46398788],
[ 0.19731697],
[-0.618 ],
[-0.618 ],
[-0.618 ],
[ 0.618 ],
[ 0.20223002],
[ 0.41614516]])
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
if maths.normInf(x_) <= self.c:
return x
y = np.copy(x_)
y[y > self.c] = self.c
y[y < -self.c] = -self.c
# Put the unregularised variables back.
if self.penalty_start > 0:
y = np.vstack((x[:self.penalty_start, :],
y))
return y
def feasible(self, x):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
x : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import LInf
>>>
>>> np.random.seed(42)
>>> x = np.random.rand(10, 1) * 2.0 - 1.0
>>> linf = LInf(c=0.618)
>>> linf.feasible(x)
False
>>> linf.feasible(linf.proj(x))
True
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
else:
x_ = x
return maths.normInf(x_) <= self.c
class L2(properties.AtomicFunction,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the L2 function with a penalty formulation
f(\beta) = l * (0.5 * ||\beta||_2 - c),
where ||\beta||_2 is the L2 loss function. The constrained version has
the form
0.5 * ||\beta||_2 <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
0.5 * ||\beta||_2 <= c. The default value is c=0, i.e. the
default is a regularised formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
From the interface "Function".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return self.l * (maths.norm(beta_) - self.c)
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
l = self.l * factor
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
norm = maths.norm(beta_)
if norm >= l:
beta_ *= (1.0 - l / norm) * beta_
else:
beta_ *= 0.0
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :], beta_))
else:
prox = beta_
return prox
def proj(self, beta, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2
>>> np.random.seed(42)
>>> l2 = L2(c=0.3183098861837907)
>>> y1 = l2.proj(np.random.rand(100, 1) * 2.0 - 1.0)
>>> np.linalg.norm(y1) # doctest: +ELLIPSIS
0.31830988...
>>> y2 = np.random.rand(100, 1) * 2.0 - 1.0
>>> l2.feasible(y2)
False
>>> l2.feasible(l2.proj(y2))
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
norm = maths.norm(beta_)
# Feasible?
if norm <= self.c:
return beta
# The correction by eps is to nudge the norm just below self.c.
eps = consts.FLOAT_EPSILON
beta_ *= self.c / (norm + eps)
proj = beta_
if self.penalty_start > 0:
proj = np.vstack((beta[:self.penalty_start, :], beta_))
return proj
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2
>>> np.random.seed(42)
>>> l2 = L2(c=0.3183098861837907)
>>> y1 = 0.01 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y1)
True
>>> y2 = 10.0 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y2)
False
>>> y3 = l2.proj(50.0 * np.random.rand(100, 1) * 2.0 - 1.0)
>>> l2.feasible(y3)
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return maths.norm(beta_) <= self.c + consts.FLOAT_EPSILON
class L2Squared(properties.AtomicFunction,
properties.Gradient,
properties.LipschitzContinuousGradient,
properties.Penalty,
properties.Constraint,
properties.ProximalOperator,
properties.ProjectionOperator):
"""The proximal operator of the squared L2 function with a penalty
formulation
f(\beta) = l * (0.5 * ||\beta||²_2 - c),
where ||\beta||²_2 is the squared L2 loss function. The constrained
version has the form
0.5 * ||\beta||²_2 <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
0.5 * ||\beta||²_2 <= c. The default value is c=0, i.e. the
default is a regularised formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
From the interface "Function".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return self.l * (0.5 * np.dot(beta_.T, beta_)[0, 0] - self.c)
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>>
>>> np.random.seed(42)
>>> beta = np.random.rand(100, 1)
>>> l2 = L2Squared(l=3.14159, c=2.71828)
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-10
True
>>>
>>> l2 = L2Squared(l=3.14159, c=2.71828, penalty_start=5)
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-10
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
grad = np.vstack((np.zeros((self.penalty_start, 1)),
self.l * beta_))
else:
beta_ = beta
grad = self.l * beta_
# approx_grad = utils.approx_grad(self.f, beta, eps=1e-4)
# print maths.norm(grad - approx_grad)
return grad
def L(self):
"""Lipschitz constant of the gradient.
"""
return self.l
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
l = self.l * factor
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :],
beta_ * (1.0 / (1.0 + l))))
else:
prox = beta_ * (1.0 / (1.0 + l))
return prox
def proj(self, beta, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>> np.random.seed(42)
>>> l2 = L2Squared(c=0.3183098861837907)
>>> y1 = l2.proj(np.random.rand(100, 1) * 2.0 - 1.0)
>>> 0.5 * np.linalg.norm(y1) ** 2 # doctest: +ELLIPSIS
0.31830988...
>>> y2 = np.random.rand(100, 1) * 2 - 1.0
>>> l2.feasible(y2)
False
>>> l2.feasible(l2.proj(y2))
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
sqnorm = np.dot(beta_.T, beta_)[0, 0]
# Feasible?
if 0.5 * sqnorm <= self.c:
return beta
# The correction by eps is to nudge the squared norm just below
# self.c.
eps = consts.FLOAT_EPSILON
if self.penalty_start > 0:
proj = np.vstack((beta[:self.penalty_start, :],
beta_ * np.sqrt((2.0 * self.c - eps) / sqnorm)))
else:
proj = beta_ * np.sqrt((2.0 * self.c - eps) / sqnorm)
return proj
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>> np.random.seed(42)
>>> l2 = L2Squared(c=0.3183098861837907)
>>> y1 = 0.1 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y1)
True
>>> y2 = 10.0 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y2)
False
>>> y3 = l2.proj(50.0 * np.random.rand(100, 1) * 2.0 - 1.0)
>>> l2.feasible(y3)
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
sqnorm = np.dot(beta_.T, beta_)[0, 0]
return 0.5 * sqnorm <= self.c + consts.FLOAT_EPSILON
class L1L2Squared(properties.AtomicFunction,
properties.Penalty,
properties.ProximalOperator):
"""The proximal operator of the L1 function with an L2 constraint.
The function is
f(x) = l1 * ||x||_1 + Indicator(||x||²_2 <= l2),
where ||.||_1 is the L1 norm and ||.||²_2 is the squared L2 norm.
Parameters
----------
l1 : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the L1 norm penalty.
l2 : Non-negative float. The limit of the constraint of of the squared L2
norm penalty.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l1=1.0, l2=1.0, penalty_start=0):
self.l1 = max(0.0, float(l1))
self.l2 = max(0.0, float(l2))
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if maths.norm(beta_) ** 2 > self.l2:
return consts.FLOAT_INF
return self.l1 * maths.norm1(beta_)
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
l1 = self.l1 * factor
prox = (np.abs(beta_) > l1) * (beta_ - l1 * np.sign(beta_ - l1))
prox *= np.sqrt(self.l2 / np.dot(prox.T, prox)[0, 0])
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :], prox))
return prox
class QuadraticConstraint(properties.AtomicFunction,
properties.Gradient,
properties.Penalty,
properties.Constraint):
"""The proximal operator of the quadratic function
f(x) = l * (x'Mx - c),
or
f(x) = l * (x'M'Nx - c),
where M or M'N is a given symmatric positive-definite matrix. The
constrained version has the form
x'Mx <= c,
or
x'M'Nx <= c
if two matrices are given.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
x'Mx <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
M : Numpy array. The given positive definite matrix. It is assumed that
the first penalty_start columns must be excluded.
N : Numpy array. The second matrix if the factors of the positive-definite
matrix are given. It is assumed that the first penalty_start
columns must be excluded.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, M=None, N=None, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
if self.penalty_start > 0:
self.M = M[:, self.penalty_start:] # NOTE! We slice M here!
self.N = N[:, self.penalty_start:] # NOTE! We slice N here!
else:
self.M = M
self.N = N
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
#val = self.l * (np.dot(beta_.T, np.dot(self.M, beta_)) - self.c)
val = self.l * (np.dot(beta_.T, self.M.dot(beta_)) - self.c)
else:
val = self.l * (np.dot(beta_.T, self.M.T.dot(self.N.dot(beta_)))
- self.c)
#val = self.l * (np.dot(beta_.T, np.dot(self.M.T,
# np.dot(self.N, beta_))) \
# - self.c)
return val
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
grad = (2.0 * self.l) * self.M.dot(beta_)
#grad = (2.0 * self.l) * np.dot(self.M, beta_)
else:
grad = (2.0 * self.l) * self.M.T.dot(self.N.dot(beta_))
#grad = (2.0 * self.l) * np.dot(self.M.T, np.dot(self.N, beta_))
if self.penalty_start > 0:
grad = np.vstack((np.zeros((self.penalty_start, 1)), grad))
return grad
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
#bMb = np.dot(beta_.T, np.dot(self.M, beta_))
bMb = np.dot(beta_.T, self.M.dot(beta_))
else:
#bMb = np.dot(beta_.T, np.dot(self.M.T, np.dot(self.N, beta_)))
bMb = np.dot(beta_.T, self.M.T.dot(self.N.dot(beta_)))
return bMb <= self.c
class GraphNet(QuadraticConstraint,
properties.LipschitzContinuousGradient):
"""The proximal operator of the GraphNet function.
f(x) = l * sum_{(i, j) \in G}(b_i - b_j)^2,
Where nodes (i, j) are connected in the Graph G and A is a (sparse) matrix
of P columns where each line contains a pair of (-1, +1) for 2 connected
nodes, and zero elsewhere.
f(x) = l * x'A'Ax.
= l * sum((Ax)^2)
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
A : Numpy or (usually) scipy.sparse array. The a matrix, made of (-1, +1),
that computes all the differences between connected nodes of the graph.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, A=None, penalty_start=0):
self.l = float(l)
self.c = 0
self.M = A # for QuadraticConstraint
self.N = A # for QuadraticConstraint
self.A = A
self.penalty_start = penalty_start
self._lambda_max = None
# TODO: Redefine grad and f, without inheritance from QuadraticConstraint
# to speed up computing of f matrix-vector multiplication only needs to be
# performed once,
def L(self):
""" Lipschitz constant of the gradient.
From the interface "LipschitzContinuousGradient".
"""
if self.l < consts.TOLERANCE:
return 0.0
lmaxA = self.lambda_max()
# The (largest) Lipschitz constant of the gradient would be the operator
# norm of 2A'A, which thus is the square of the largest singular value
# of 2A'A.
return self.l * (2 * lmaxA) ** 2
def lambda_max(self):
""" Largest eigenvalue of the corresponding covariance matrix.
From the interface "Eigenvalues".
"""
# From functions.nesterov.tv.TotalVariation.L
# Note that we can save the state here since lmax(A) does not change.
# TODO: This only work if the elements of self._A are scipy.sparse. We
# should allow dense matrices as well.
if self._lambda_max is None:
from parsimony.algorithms.nipals import RankOneSparseSVD
A = self.A
# TODO: Add max_iter here!
v = RankOneSparseSVD().run(A) # , max_iter=max_iter)
us = A.dot(v)
self._lambda_max = np.sum(us ** 2)
return self._lambda_max
class RGCCAConstraint(QuadraticConstraint,
properties.ProjectionOperator):
"""Represents the quadratic function
f(x) = l * (x'(tau * I + ((1 - tau) / n) * X'X)x - c),
where tau is a given regularisation constant. The constrained version has
the form
x'(tau * I + ((1 - tau) / n) * X'X)x <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
x'(tau * I + ((1 - tau) / n) * X'X)x <= c. The default value is
c=0, i.e. the default is a regularisation formulation.
tau : Non-negative float. The regularisation constant.
X : Numpy array, n-by-p. The associated data matrix. The first
penalty_start columns will be excluded.
unbiased : Boolean. Whether the sample variance should be unbiased or not.
Default is True, i.e. unbiased.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, tau=1.0, X=None, unbiased=True,
penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.tau = max(0.0, min(float(tau), 1.0))
if penalty_start > 0:
self.X = X[:, penalty_start:] # NOTE! We slice X here!
else:
self.X = X
self.unbiased = bool(unbiased)
self.penalty_start = max(0, int(penalty_start))
self.reset()
def reset(self):
self._U = None
self._S = None
self._V = None
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
xtMx = self._compute_value(beta_)
return self.l * (xtMx - self.c)
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.unbiased:
n = float(self.X.shape[0] - 1.0)
else:
n = float(self.X.shape[0])
if self.tau < 1.0:
XtXbeta = np.dot(self.X.T, np.dot(self.X, beta_))
grad = (self.tau * 2.0) * beta_ \
+ ((1.0 - self.tau) * 2.0 / n) * XtXbeta
else:
grad = (self.tau * 2.0) * beta_
if self.penalty_start > 0:
grad = np.vstack(np.zeros((self.penalty_start, 1)),
grad)
# approx_grad = utils.approx_grad(self.f, beta, eps=1e-4)
# print maths.norm(grad - approx_grad)
return grad
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
xtMx = self._compute_value(beta_)
return xtMx <= self.c
def proj(self, beta, **kwargs):
"""The projection operator corresponding to the function.
From the interface "ProjectionOperator".
Examples
--------
>>> import parsimony.functions.penalties as penalties
>>> import numpy as np
>>> np.random.seed(42)
>>>
>>> X = np.random.randn(10, 10)
>>> x = np.random.randn(10, 1)
>>> L2 = penalties.RGCCAConstraint(c=1.0, tau=1.0, X=X, unbiased=True)
>>> np.abs(L2.f(x) - 5.7906381220390024) < 5e-16
True
>>> y = L2.proj(x)
>>> abs(L2.f(y)) <= 2.0 * consts.FLOAT_EPSILON
True
>>> np.abs(np.linalg.norm(y) - 0.99999999999999989) < 5e-16
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
xtMx = self._compute_value(beta_)
if xtMx <= self.c + consts.FLOAT_EPSILON:
return beta
n, p = self.X.shape
if self.unbiased:
n_ = float(n - 1.0)
else:
n_ = float(n)
if self.tau == 1.0:
sqnorm = np.dot(beta_.T, beta_)
eps = consts.FLOAT_EPSILON
y = beta_ * np.sqrt((self.c - eps) / sqnorm)
else:
if self._U is None or self._S is None or self._V is None:
# self._U, self._S, self._V = np.linalg.svd(X_, full_matrices=0)
# numpy.linalg.svd runs faster on the transpose.
self._V, self._S, self._U = np.linalg.svd(self.X.T,
full_matrices=0)
self._V = self._V.T
self._U = self._U.T
self._S = ((1.0 - self.tau) / n_) * (self._S ** 2) + self.tau
self._S = self._S.reshape((min(n, p), 1))
atilde = np.dot(self._V, beta_)
atilde2 = atilde ** 2
ssdiff = np.dot(beta_.T, beta_)[0, 0] - np.sum(atilde2)
atilde2lambdas = atilde2 * self._S
atilde2lambdas2 = atilde2lambdas * self._S
tau2 = self.tau ** 2
from parsimony.algorithms.utils import NewtonRaphson
newton = NewtonRaphson(force_negative=True,
parameter_positive=True,
parameter_negative=False,
parameter_zero=True,
eps=consts.TOLERANCE,
max_iter=30)
class F(properties.Function,
properties.Gradient):
def __init__(self, tau, S, c):
self.tau = tau
self.S = S
self.c = c
self.precomp = None
self.precomp_mu = None
def f(self, mu):
term1 = (self.tau / ((1.0 + 2.0 * mu * self.tau) ** 2)) \
* ssdiff
self.precomp = 1.0 + (2.0 * mu) * self.S
self.precomp_mu = mu
term2 = np.sum(atilde2lambdas * (self.precomp ** -2))
return term1 + term2 - self.c
def grad(self, mu):
term1 = (-4.0 * tau2 \
/ ((1.0 + 2.0 * mu * self.tau) ** 3.0)) * ssdiff
if self.precomp is None or self.precomp_mu != mu:
self.precomp = 1.0 + (2.0 * mu) * self.S
term2 = -4.0 * np.sum(atilde2lambdas2 \
* (self.precomp ** -3.0))
self.precomp = None
self.precomp_mu = None
return term1 + term2
# if max(n, p) >= 1000:
# # A rough heuristic for finding a start value. Works well in
# # many cases, and when it does not work we have only lost one
# # iteration and restart at 0.0.
# start_mu = np.sqrt(min(n, p)) \
# / max(1.0, self.c) \
# / max(0.1, self.tau)
# elif max(n, p) >= 100:
# start_mu = 1.0
# else:
start_mu = 0.0
mu = newton.run(F(self.tau, self._S, self.c), start_mu)
# Seems to be possible because of machine precision.
if mu <= consts.FLOAT_EPSILON:
return beta
if p > n:
l2 = ((self._S - self.tau) \
* (1.0 / ((1.0 - self.tau) / n_))).reshape((n,))
a = 1.0 + 2.0 * mu * self.tau
b = 2.0 * mu * (1.0 - self.tau) / n_
y = (beta_ - np.dot(self.X.T, np.dot(self._U,
(np.reciprocal(l2 + (a / b)) \
* np.dot(self._U.T,
np.dot(self.X, beta_)).T).T))) * (1. / a)
else: # The case when n >= p
l2 = ((self._S - self.tau)
* (1.0 / ((1.0 - self.tau) / n_))).reshape((p,))
a = 1.0 + 2.0 * mu * self.tau
b = 2.0 * mu * (1.0 - self.tau) / n_
y = np.dot(self._V.T, (np.reciprocal(a + b * l2) * atilde.T).T)
if self.penalty_start > 0:
y = np.vstack((beta[:self.penalty_start, :],
y))
return y
def _compute_value(self, beta):
"""Helper function to compute the function value.
Note that beta must already be sliced!
"""
if self.unbiased:
n = float(self.X.shape[0] - 1.0)
else:
n = float(self.X.shape[0])
Xbeta = np.dot(self.X, beta)
val = self.tau * np.dot(beta.T, beta) \
+ ((1.0 - self.tau) / n) * np.dot(Xbeta.T, Xbeta)
return val[0, 0]
class RidgeSquaredError(properties.CompositeFunction,
properties.Gradient,
properties.StronglyConvex,
properties.Penalty,
properties.ProximalOperator):
"""Represents a ridge squared error penalty, i.e. a representation of
f(x) = l.((1 / (2 * n)) * ||Xb - y||²_2 + (k / 2) * ||b||²_2),
where ||.||²_2 is the L2 norm.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
X : Numpy array (n-by-p). The regressor matrix.
y : Numpy array (n-by-1). The regressand vector.
k : Non-negative float. The ridge parameter.
penalty_start : Non-negative integer. The number of columns, variables
etc., to except from penalisation. Equivalently, the first
index to be penalised. Default is 0, all columns are included.
mean : Boolean. Whether to compute the squared loss or the mean
squared loss. Default is True, the mean squared loss.
"""
def __init__(self, X, y, k, l=1.0, penalty_start=0, mean=True):
self.l = max(0.0, float(l))
self.X = X
self.y = y
self.k = max(0.0, float(k))
self.penalty_start = max(0, int(penalty_start))
self.mean = bool(mean)
self.reset()
def reset(self):
"""Free any cached computations from previous use of this Function.
From the interface "Function".
"""
self._Xty = None
self._s2 = None
self._V = None
def f(self, x):
"""Function value.
From the interface "Function".
Parameters
----------
x : Numpy array. Regression coefficient vector. The point at which to
evaluate the function.
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
# Xx = np.dot(self.X[:, self.penalty_start:], x_)
Xx_ = np.dot(self.X, x) \
- np.dot(self.X[:, :self.penalty_start],
x[:self.penalty_start, :])
# print "penalties.RidgeSquaredError, DIFF:", \
# np.linalg.norm(Xx - Xx_)
else:
x_ = x
Xx_ = np.dot(self.X, x_)
if self.mean:
d = 2.0 * float(self.X.shape[0])
else:
d = 2.0
f = (1.0 / d) * np.sum((Xx_ - self.y) ** 2) \
+ (self.k / 2.0) * np.sum(x_ ** 2)
return self.l * f
def grad(self, x):
"""Gradient of the function at beta.
From the interface "Gradient".
Parameters
----------
x : Numpy array. The point at which to evaluate the gradient.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.losses import RidgeRegression
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> y = np.random.rand(100, 1)
>>> rr = RidgeRegression(X=X, y=y, k=3.14159265)
>>> beta = np.random.rand(150, 1)
>>> np.linalg.norm(rr.grad(beta)
... - rr.approx_grad(beta, eps=1e-4)) < 5e-8
True
"""
if self.penalty_start > 0:
x_ = x[self.penalty_start:, :]
X_ = self.X[:, self.penalty_start:]
grad = np.dot(X_.T, np.dot(self.X_, x_) - self.y)
del X_
else:
x_ = x
grad = np.dot((np.dot(self.X, x_) - self.y).T, self.X).T
if self.mean:
grad *= 1.0 / float(self.X.shape[0])
grad += self.k * x_
if self.penalty_start > 0:
grad = np.vstack((np.zeros((self.penalty_start, 1)),
self.l * grad))
else:
grad += self.l
return grad
def L(self):
"""Lipschitz constant of the gradient.
From the interface "LipschitzContinuousGradient".
"""
if self._lambda_max is None:
s = np.linalg.svd(self.X, full_matrices=False, compute_uv=False)
self._lambda_max = np.max(s) ** 2
if len(s) < self.X.shape[1]:
self._lambda_min = 0.0
else:
self._lambda_min = np.min(s) ** 2
if self.mean:
self._lambda_max /= float(self.X.shape[0])
self._lambda_min /= float(self.X.shape[0])
return self.l * (self._lambda_max + self.k)
def parameter(self):
"""Returns the strongly convex parameter for the function.
From the interface "StronglyConvex".
"""
if self._lambda_min is None:
self._lambda_max = None
self.L() # Precompute
return self.l * (self._lambda_min + self.k)
def prox(self, x, factor=1.0, eps=consts.TOLERANCE, max_iter=100):
"""The proximal operator associated to this function.
Parameters
----------
x : Numpy array (p-by-1). The point at which to apply the proximal
operator.
factor : Positive float. A factor by which the Lagrange multiplier is
scaled. This is usually the step size.
eps : Positive float. This is the stopping criterion for inexact
proximal methods, where the proximal operator is approximated
numerically.
max_iter : Positive integer. This is the maximum number of iterations
for inexact proximal methods, where the proximal operator is
approximated numerically.
index : Non-negative integer. For multivariate functions, this
identifies the variable for which the proximal operator is
associated.
From the interface "ProximalOperator".
"""
# y = inv(X'.X + (k + 1 / l).I).((1 / l).x + X'.v)
n, p = self.X.shape
rho = 1.0 / self.l
if self._Xty is None:
self._Xty = np.dot(self.X.T, self.y)
v = rho * x + self._Xty
c = self.k + rho
if n >= p:
if self._s2 is None or self._V is None:
# # Ridge solution
# XtX_klI = np.dot(self.X.T, self.X)
# index = np.arange(min(XtX_klI.shape))
# XtX_klI[index, index] += c
# self._inv_XtX_klI = np.linalg.inv(XtX_klI)
_, self._s2, self._V = np.linalg.svd(self.X)
self._V = self._V.T
self._s2 = self._s2.reshape((p, 1)) ** 2
# _inv_XtX_klI = np.dot(V, np.reciprocal(c + s ** 2) * V.T)
# y = np.dot(self._inv_XtX_klI, v)
y = np.dot(self._V,
np.reciprocal(c + self._s2) * np.dot(self._V.T, v))
else: # If n < p
if self._s2 is None or self._V is None:
# # Ridge solution using the Woodbury matrix identity.
# XXt_klI = np.dot(self.X, self.X.T)
# index = np.arange(min(XXt_klI.shape))
# XXt_klI[index, index] += c
# self._inv_XtX_klI = np.linalg.inv(XXt_klI)
_, self._s2, self._V = np.linalg.svd(self.X.T)
self._V = self._V.T
self._s2 = self._s2.reshape((n, 1)) ** 2
# _inv_XtX_klI = np.dot(V, np.reciprocal(c + s ** 2) * V.T)
# y = (v - np.dot(self.X.T, np.dot(self._inv_XtX_klI,
# np.dot(self.X, v)))) / c
Xv = | np.dot(self.X, v) | numpy.dot |
import numpy as np
import pandas as pd
import cv2
from config import *
from scipy.stats import multivariate_normal
import itertools
import tensorflow as tf
class Misc:
@staticmethod
def get_image_rgb(text):
img = np.zeros(shape= (Config.IMAGE_SIZE[0], Config.IMAGE_SIZE[1], 3), dtype= np.uint8)
values = [int(i) for i in text.split(' ')]
for i in xrange(Config.IMAGE_SIZE[0]):
for j in xrange(Config.IMAGE_SIZE[1]):
aux = values[j + i*Config.IMAGE_SIZE[1]]
img[i, j, :] = aux, aux, aux
return img
@staticmethod
def get_image_gray(text):
img = np.zeros(shape= (Config.IMAGE_SIZE[0], Config.IMAGE_SIZE[1]), dtype= np.uint8)
values = [int(i) for i in text.split(' ')]
for i in xrange(Config.IMAGE_SIZE[0]):
for j in xrange(Config.IMAGE_SIZE[1]):
aux = values[j + i*Config.IMAGE_SIZE[1]]
img[i, j] = aux
return img
@staticmethod
def visualize(row):
img = Misc.get_image_rgb(row)
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
@staticmethod
def visitor_row(file, visitor):
data=pd.read_csv(file)
for i in xrange(len(data)):
visitor(data.iloc[i])
@staticmethod
def prepare_data_from_file(file):
X = []
Y = []
def visitor(row):
X.append(Misc.get_image_gray(row.Image))
Y.append(row.iloc[:-1])
Misc.visitor_row(file, visitor)
X = np.reshape(np.array(X), [len(X), Config.IMAGE_SIZE[0], Config.IMAGE_SIZE[1], 1])
Y = np.nan_to_num(np.array(Y, dtype='float32'))
# Normalization
X_mean = np.mean(X[:])
X_std = np.std(X[:])
X_norm = (X - X_mean)/X_std
return X_norm, Y, {'mean': X_mean, 'std': X_std}
@staticmethod
def prepare_data_from_file_with_unknowns(file):
X = []
Y = []
def prepare_unknowns(row):
row = np.array(row, dtype='float32')
num = len(row) / 2
# out = x + y + unknown
arr = np.ones(shape=(3*num))
arr[:len(row)] = np.nan_to_num(row)
idx = np.argwhere(np.isnan(row))
idx = np.uint8(idx / 2)
arr[len(row)+idx] = 0
return arr
def visitor(row):
X.append(Misc.get_image_gray(row.Image))
Y.append(prepare_unknowns(row.iloc[:-1]))
Misc.visitor_row(file, visitor)
X = np.reshape(np.array(X), [len(X), Config.IMAGE_SIZE[0], Config.IMAGE_SIZE[1], 1])
Y = np.array(Y, dtype='float32')
# Normalization
X_mean = | np.mean(X[:]) | numpy.mean |
# Ref: https://github.com/rcassani/mlp-example/blob/master/mlp_notebook.ipynb
import mlp
# from mlp import *
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os.path
# === Loading Dataset
# for windows
with open(os.path.dirname(os.path.abspath(__file__)) + '/../data/SP_Dataset.pickle', 'rb') as f:
sp_dataset = pickle.load(f)
# for linux
#with open(os.path.dirname(__file__) + '/../data/SP_Dataset.pickle', 'rb') as f:
# sp_dataset = pickle.load(f)
ndata, ncol = np.shape(sp_dataset)
itrain = int(0.8 * ndata)
print('train data: ', itrain)
measure_loc = np.linspace(0, 150, 101)
X_train = []
X_valid = []
y_train = []
y_valid = []
it_split = 1
# getSPData contains SPdata, SPdata_noise, and noise_data
for param, getSPData in sp_dataset:
# split data for training and validation
if it_split <= itrain:
X_train.append(getSPData[1]) # SPdata_noise
y_train.append(param)
else:
X_valid.append(getSPData[1]) # SPdata_noise
y_valid.append(param)
it_split = it_split + 1
# Training data
X_train = np.array(X_train)
y_train = np.array(y_train)
# size X_train and y_train
print('size X_train: ', X_train.shape)
print('size y_train: ', y_train.shape)
# === Create MLP object
# input: 101 | hidden 1: 2 | hidden 2: 5 | output: 4
mlp_regression = mlp.Mlp(size_layers=[101, 2, 5, 4],
act_funct='relu', # tanh
reg_lambda=0.1,
bias_flag=True)
print(mlp_regression)
# === Training MLP object
# Training with Backpropagation and 400 iterations
iterations = 150 # epoch
loss = np.zeros([iterations, 1])
for ix in range(iterations):
mlp_regression.train(X_train, y_train, 1)
Y_hat = mlp_regression.predict(X_train)
y_tmp = np.argmax(Y_hat, axis=1)
# y_hat = labels[y_tmp]
y_hat = Y_hat
loss[ix] = (0.5) * np.square(y_hat - y_train).mean()
# Ploting loss vs iterations
plt.figure()
ix = np.arange(iterations)
plt.plot(ix, loss)
# Training Accuracy
Y_hat = mlp_classifier.predict(X_train)
y_tmp = | np.argmax(Y_hat, axis=1) | numpy.argmax |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import lmfit
import copy
class LightEvent: #designates light distribution starting from some moment to the next lightevent
def __init__(self, wavelength, intensity, start_time):
#verify if thse values make any sense
self.wavelength = wavelength
self.intensity = intensity
self.start_time = start_time
class KineticData:
def __init__(self, filename, probe, irradiation, intensity = 0.0, absorbance = 0.0, t_on = None, t_off = None, probe_length = 1, irradiation_length = 1, num = None, zeroed = False, src = None, skip_header = 0, skip_footer = 0, temperature = None):
if(src == None):
#inport data
#data = np.loadtxt(filename) #previously used
data = np.genfromtxt(filename, skip_header=skip_header, skip_footer=skip_footer, dtype=float, invalid_raise = False)
self.data_t = data[:,0]
self.data_a = data[:,1]
elif(src == "Lucas"): #read data of meine beste freunde lucasso !
avg_area = 4 #!!! set the area you want to average (because in these data wavelengths are densely probed)
dead_columns = 2 #two columns are not absorbances (column with numbering + time)
tmp = pd.read_csv(filename)
columns = tmp.columns.values[dead_columns:]
columns2 = columns.astype(np.float)
positions = | np.where((columns2 >= probe-avg_area/2) & (columns2 <= probe+avg_area/2)) | numpy.where |
# Filename: test_online.py
# pylint: disable=locally-disabled,C0111,R0904,C0301,C0103,W0212
from km3pipe.testing import TestCase, patch, skip, data_path
import sys
import numpy as np
from km3pipe.io.online import EventPump, TimeslicePump, SummaryslicePump
from km3pipe.io.hdf5 import HDF5Pump
from os.path import join, dirname, abspath
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
ONLINE_TEST_FILE = data_path("online/events_sample.root")
ONLINE_TEST_FILE_H5 = data_path("online/events_sample.h5")
class TestEventPump(TestCase):
def setUp(self):
self.jpp_pump = EventPump(filename=ONLINE_TEST_FILE)
self.h5_pump = HDF5Pump(filename=ONLINE_TEST_FILE_H5)
def tearDown(self):
self.jpp_pump.finish()
self.h5_pump.finish()
def test_event_info(self):
n = self.jpp_pump.n_events
for i in range(n):
h5blob = self.h5_pump[i]
jppblob = self.jpp_pump[i]
for k in h5blob["EventInfo"].dtype.names:
if k not in jppblob["EventInfo"].dtype.names:
continue
ref_value = h5blob["EventInfo"][k][0]
test_value = jppblob["EventInfo"][k][0]
if isinstance(ref_value, float):
if np.isnan(ref_value):
assert | np.isnan(test_value) | numpy.isnan |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback """
import math
import numpy as np
import pytest
from mindspore import ms_function, context, Tensor, nn
context.set_context(mode=context.GRAPH_MODE)
def test_fallback_abs_integer():
"""
Feature: JIT Fallback
Description: Test abs(int) in graph mode
Expectation: No exception
"""
@ms_function
def foo():
x = -1
return abs(x)
assert foo() == 1
def test_fallback_abs_float():
"""
Feature: JIT Fallback
Description: Test abs(float) in graph mode
Expectation: No exception
"""
@ms_function
def foo():
x = -1.0
return abs(x)
assert math.isclose(foo(), 1.0, abs_tol=1e-5)
def test_fallback_abs_complex():
"""
Feature: JIT Fallback
Description: Test abs(complex) in graph mode
Expectation: No exception
"""
@ms_function
def foo():
x = complex(-1, 2)
return abs(x)
assert math.isclose(foo(), abs(-1 + 2j), abs_tol=1e-5)
def test_fallback_abs_numpy():
"""
Feature: JIT Fallback
Description: Test abs(np.array) in graph mode
Expectation: No exception
"""
@ms_function
def foo():
x = abs(np.array([1, -2, 3]))
return Tensor(x)
assert np.all(foo().asnumpy() == abs(np.array([-1, 2, -3])))
def test_fallback_abs_cell_construct_tensor():
"""
Feature: JIT Fallback
Description: Test abs(Tensor) the tensor is construct in construct function in graph mode
Expectation: No exception
"""
class TestCell(nn.Cell):
def construct(self):
x = Tensor([-1, 2])
return abs(x)
test_cell = TestCell()
assert np.all(test_cell().asnumpy() == | np.array([1, 2]) | numpy.array |
import h5py, os, time, sys
import numpy as np
from scipy.special import gammaln, digamma, multigammaln
from scipy.optimize import minimize
from scipy.stats import chi2
from sklearn_extensions.fuzzy_kmeans import FuzzyKMeans
from sklearn.metrics.pairwise import euclidean_distances
from collections import Counter
from multiprocessing.pool import Pool
#import warnings
np.seterr(all='raise')
np.set_printoptions(precision=4)
np.set_printoptions(linewidth=140)
def softplus(x):
trunc = np.log(np.finfo(np.float64).max)
min_trunc = softplus_inverse(1e-6)
if np.isscalar(x):
if x > trunc:
return x
else:
try:
v = np.log(np.exp(x) + 1)
except:
v = 0
return v
return np.log(np.exp(x) + 1)
trunc_x = np.array(x, dtype=np.float64)
trunc_x[trunc_x > trunc] = trunc
trunc_x[trunc_x < min_trunc] = min_trunc
try:
val = np.log(np.exp(trunc_x) + 1)
except:
print(trunc)
print(trunc_x)
val[trunc_x==trunc] = x[trunc_x==trunc]
val[trunc_x==min_trunc] = x[trunc_x==min_trunc]
return val
def softplus_inverse(x):
hi_trunc = np.log(np.finfo(np.float32).max)
lo_trunc = 1e-10
if np.isscalar(x):
if x > hi_trunc:
return x
elif x < lo_trunc:
return np.log(np.exp(lo_trunc) - 1)
else:
return np.log(np.exp(x) - 1)
trunc_x = np.array(x, dtype=np.float64)
trunc_x[trunc_x > hi_trunc] = hi_trunc
trunc_x[trunc_x < lo_trunc] = lo_trunc
val = np.log(np.exp(trunc_x) - 1)
val[trunc_x==hi_trunc] = x[trunc_x==hi_trunc]
return val
def softplus_derivative(x):
trunc = np.log(np.finfo(np.float64).max)
if np.isscalar(x):
if x > trunc:
return 1.0
else:
return np.float64(np.exp(x) / (1. + np.exp(x)))
rv = np.ones(x.shape)
rv[x <= trunc] = np.float64(np.exp(x[x <= trunc]) / (1. + np.exp(x[x <= trunc])))
return rv
def covar(a, b):
# (s, M)
v = (a - sum(a)/a.shape[0]) * (b - sum(b)/b.shape[0])
return sum(v) / v.shape[0]
def var(a):
rv = covar(a, a)
return np.maximum(1e-300, rv)
def logfactorial(x):
if np.isscalar(x):
return np.log(np.arange(1,x+1)).sum()
rv = np.zeros(x.shape)
if len(rv.shape) == 1:
for i in range(len(x)):
rv[i] = np.log(np.arange(1,x[i]+1)).sum()
else:
for i in range(rv.shape[0]):
for j in range(rv.shape[1]):
rv[i,j] = np.log(np.arange(1,x[i,j]+1)).sum()
return rv
# the following two functions from https://gist.github.com/jfrelinger/2638485
def invwishartrand(nu, phi):
return np.linalg.inv(wishartrand(nu, np.linalg.inv(phi)))
def wishartrand(nu, phi):
dim = phi.shape[0]
chol = np.linalg.cholesky(phi)
foo = np.zeros((dim,dim))
for i in range(dim):
for j in range(i+1):
if i == j:
foo[i,j] = np.sqrt(chi2.rvs(nu-(i+1)+1))
else:
foo[i,j] = np.random.normal(0,1)
return np.dot(chol, np.dot(foo, np.dot(foo.T, chol.T)))
def logGamma(x, mu, alpha):
x = np.maximum(x, 1e-6)
shape = mu / alpha
scale = alpha
return ((shape-1)*np.log(x) - (x/scale) - \
gammaln(shape) - shape*np.log(scale))
def logiGamma(x, alpha, beta):
return alpha * np.log(beta) - gammaln(alpha) - \
(alpha-1) * np.log(x) - beta / x
def logNormal(x, loc, var):
diff = x - loc
thresh = np.sqrt(np.finfo(np.float64).max / 2)
diff[diff > thresh] = thresh
return -0.5 * (np.log(2 * np.pi * var) + \
diff ** 2 / var)
def logMVNormal(x, mu, Sigma, iSigma, detSigma, scale):
try:
return -0.5 * (detSigma - Sigma.shape[0] * np.log(scale) + \
np.matmul(np.matmul(np.transpose(x - mu), \
iSigma * scale), x - mu))
except:
print(Sigma)
raise
def logiWishart(x, df, scale):
d = scale.shape[0]
sign, logdetScale = np.linalg.slogdet(scale)
sign, logdetX = np.linalg.slogdet(x)
rv = (df * 0.5) * logdetScale - \
(df * d * 0.5) * np.log(2) - \
multigammaln(df * 0.5, d) - \
((df + d + 1) * 0.5) * logdetX - \
0.5 * np.matmul(scale, np.linalg.inv(x)).diagonal().sum()
return rv
def logBetaShape(x, alpha, beta):
x = np.minimum(np.maximum(alpha, 1e-10), 1-1e-10)
alpha = np.maximum(alpha, 1e-10)
beta = np.maximum(beta, 1e-10)
return (alpha - 1.0) * | np.log(x) | numpy.log |
from bresenham import bresenham
from scipy.spatial import Voronoi
import numpy as np
from queue import PriorityQueue
import networkx as nx
def closest_node(graph, current_position):
'''
Compute the closest node in the graph to the current position
'''
closest_node = None
dist = 100000
xy_position = (current_position[0], current_position[1])
for p in graph.nodes:
d = heuristic(xy_position, p)
if d < dist:
closest_node = p
dist = d
return closest_node
def create_grid_and_edges(data, drone_altitude, safety_distance):
'''
Create a grid representation of a 2D configuration space and a Voronoi Graph
'''
# minimum and maximum north coordinates
north_min = np.floor( | np.min(data[:, 0] - data[:, 3]) | numpy.min |
# %load ../../src/models/model_utils.py
# %%writefile ../../src/models/model_utils.py
"""
Author: <NAME>
Created in the scope of my PhD
"""
import pandas as pd
import numpy as np
import sklearn as sk
import math
import itertools
from scipy import stats
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression, Ridge, Lasso, HuberRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVC, SVR
from sklearn.preprocessing import PolynomialFeatures
def CreateRankedLabels(a):
pw = list(itertools.combinations(a,2))
labels = [1 if item[0]>item[1] else -1 for item in pw]
return labels
def GetParameterSet(parLabel, parRange):
"""Retrieve a set of parameter values used for training of a model in sklearn.
Parameters
-----------
parLabel : 1-dimensional numpy array (str)
numpy array holding a set of parameter labels. Valid labels include:
[alpha, gamma, C, coef0, epsilon, max_depth, min_samples, max_features]
parRange : 1-dimensional numpy array (int)
numpy array with the amount of parameters returned for every parameter label.
parLabel and parRange must be of the same dimension.
Returns
--------
parSet : Dictionary
Dictionary containing a set of parameters for every label
"""
if parLabel[0] in ['max_depth','min_samples_split', 'max_features']:
parameters = [np.zeros(parRange[u],dtype=np.int) for u in range(len(parRange))]
else:
parameters = [np.zeros(parRange[u]) for u in range(len(parRange))]
for i in range(len(parLabel)):
if parLabel[i] == "alpha":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "gamma":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "C":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "coef0":
parameters[i][:] = [math.pow(10,(u - np.around(parRange[i]/2))) for u in range(parRange[i])]
elif parLabel[i] == "epsilon":
parameters[i][:] = [0+2/parRange[i]*u for u in range(parRange[i])]
elif parLabel[i] == "max_depth":
parameters[i][:] = [int(u+1) for u in range(parRange[i])]
elif parLabel[i] == 'min_samples_split':
parameters[i][:] = [int(u+2) for u in range(parRange[i])]
elif parLabel[i] == 'max_features':
parameters[i][:] = [int(u+2) for u in range(parRange[i])]
else:
return print("Not a valid parameter")
parSet = {parLabel[u]:parameters[u] for u in range(len(parLabel))}
return parSet
def EvaluateParameterSet(X_train, X_test, y_train, y_test, parModel, parSet):
"""Evaluate the scores of a set of parameters for a given model.
Parameters
-----------
X_train:
Training dataset features
X_test:
Test dataset features
y_train
Training dataset labels
y_test
Test dataset labels
parModel: Dictionary
parSet : Dictionary
Dictionary holding the parameter label and values over which the model has to be
evaluated. This can be created through the function GetParameterSet.
Accepted keys are:
[alpha, gamma, C, coef0, epsilon, max_depth, min_samples, max_features]
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
scores = []
for i in range(len(parSet[parLabel])):
parSetIt = {parLabel:parSet[parLabel][i]}
model = SelectModel(**parModel,**parEvalIt)
model.fit(X_train,y_train)
scores = np.append(model.score(X_test,y_test))
optimalPar = parSet[parLabel][np.argmax(scores)]
return scores, optimalPar
def EvaluateScore(X_train, X_test, y_train, y_test, parModel, scoring='default', pw=False):
"""Evaluates the score of a model given for a given test and training data
Parameters
-----------
X_train, X_test: DataFrame
Test and training data of the features
y_train, y_test: 1-dimensional numpy array
Test and training data of the labels
parModel: dictionary
Parameters indicating the model and some of its features
Returns
--------
score: int
Score of the test data on the model
y_pred: 1-dimensional array
An array giving the predicted labels for a given test set
"""
model = SelectModel(**parModel)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
if scoring == 'default':
score = model.score(X_test,y_test)
elif scoring == 'kt':
if pw is True:
score = KendallTau(y_pred, y_test)
if pw is False:
y_pred_pw = CreateRankedLabels(y_pred)
y_test_pw = CreateRankedLabels(y_test)
score = KendallTau(y_pred_pw, y_test_pw)
elif scoring == 'spearman':
score = stats.spearmanr(y_test, y_pred)[0]
else:
raise("Scoring type not defined. Possible options are: 'default', 'kt', and 'spearman'")
return score, y_pred
def KendallTau(y_pred, y_true):
a = np.array(y_pred)
b = np.array(y_true)
n = len(y_pred)
score = (np.sum(a==b)-np.sum(a!=b))/n
return score
def LearningCurveInSample(dfDataset, featureBox, y ,parModel, scoring='default', k=5, pw=False, step=1):
"""Calculates the learning curve of a dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
X = featureBox.values
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp[:-(len(temp)%k)])
else:
dfId = dfDataset['ID'][:-(len(dfDataset)%k)]
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.array([])
it=0
for i in range(k):
boolTest = np.logical_and(indexId>=i*lenId/k,indexId<(i+1)*lenId/k)
test = Id[boolTest]
train = Id[np.invert(boolTest)]
if pw is True:
indexTest = (dfDataset['ID_1'].isin(test) | dfDataset['ID_2'].isin(test)).values
else:
indexTest = dfDataset['ID'].isin(test).values
dfDatasetTrain = dfDataset[np.invert(indexTest)]
X_train, y_train = featureBox[np.invert(indexTest)], y[np.invert(indexTest)]
X_test, y_test = featureBox[indexTest], y[indexTest]
for j in range((len(train)-5)//step):
print("\rProgress {:2.1%}".format(it/k+(j/len(train)/k*step)), end='')
trainInner = train[:(j*step)+5]
if pw is True:
indexTrainInner = (dfDatasetTrain['ID_1'].isin(trainInner) & dfDatasetTrain['ID_2'].isin(trainInner)).values
else:
indexTrainInner = (dfDatasetTrain['ID'].isin(trainInner)).values
X_trainInner, y_trainInner = X_train[indexTrainInner], y_train[indexTrainInner]
score, y_pred = EvaluateScore(X_trainInner, X_test, y_trainInner, y_test, {**parModel}, scoring, pw)
scores = np.append(scores,score)
it+=1
scores = scores.reshape((k,-1))
return scores
def LearningCurveInSampleEnriched(dfDataset, featureBox, enrichBox, y, y_enrich ,parModel,
scoring='default', k=5, pw=True, step=1):
"""Calculates the learning curve of an enriched dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp[:-(len(temp)%k)])
else:
dfId = dfDataset['ID'][:-(len(dfDataset)%k)]
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.array([])
it=0
for i in range(k):
boolTest = np.logical_and(indexId>=i*lenId/k,indexId<(i+1)*lenId/k)
test = Id[boolTest]
train = Id[np.invert(boolTest)]
if pw is True:
indexTest = (dfDataset['ID_1'].isin(test) | dfDataset['ID_2'].isin(test)).values
else:
indexTest = dfDataset['ID'].isin(test).values
dfDatasetTrain = dfDataset[np.invert(indexTest)]
X_train = featureBox[np.invert(indexTest)]
y_train = y[np.invert(indexTest)]
X_test, y_test = featureBox[indexTest], y[indexTest]
for j in range((len(train))//step):
print("\rProgress {:2.1%}".format(it/k+(j/len(train)/k*step)), end='')
trainInner = train[:(j*step)]
if pw is True:
indexTrainInner = (dfDatasetTrain['ID_1'].isin(trainInner) & dfDatasetTrain['ID_2'].isin(trainInner)).values
else:
indexTrainInner = (dfDatasetTrain['ID'].isin(trainInner)).values
X_trainInner = np.vstack((enrichBox,X_train[indexTrainInner]))
y_trainInner = np.append(y_enrich, y_train[indexTrainInner])
score, y_pred = EvaluateScore(X_trainInner, X_test, y_trainInner, y_test, {**parModel}, scoring, pw)
scores = np.append(scores,score)
it+=1
scores = scores.reshape((k,-1))
return scores
def LearningCurveOutOfSample(dfDataset, featureBox, y , dataList, parModel, scoring='default', pw=False, step=1):
"""Calculates the learning curve of a dataset for a given model
Parameters
-----------
dfDataset: Dataframe
Dataframe holding sequences,
featureBox: Dataframe
Test dataset features
y: 1-dimensional numpy array
parModel: Dictionary
k: int
pw: Boolean
step: int
Returns
--------
scores: 1-dimensional numpy array: int
Fitted scores of the model with each of the parametersSets
optimalPar: int
Optimal parameter value for a given parameter label
"""
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp)
else:
dfId = dfDataset['ID']
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.zeros(shape=(len(dataList),(lenId-5)//step))
for i in range((lenId-5)//step):
print("\rProgress {:2.1%}".format(i/lenId*step), end='')
train = Id[:((i*step)+5)]
if pw is True:
indexTrain = (dfDataset['ID_1'].isin(train) & dfDataset['ID_2'].isin(train)).values
else:
indexTrain = dfDataset['ID'].isin(train).values
X_train, y_train = featureBox[indexTrain], y[indexTrain]
for j in range(len(dataList)):
score, y_pred = EvaluateScore(X_train, dataList[j][1].values, y_train, dataList[j][2],
{**parModel}, scoring, pw)
scores[j,i] = score
return scores
def LearningCurveOutOfSampleEnriched(dfDataset, featureBox, enrichBox, y, y_enrich, dataOutList,
parModel, scoring='default', pw=True, step=1):
if pw is True:
temp = np.unique(dfDataset[['ID_1', 'ID_2']].values)
dfId = pd.Series(temp)
else:
dfId = dfDataset['ID']
lenId = len(dfId)
Id = dfId.values
indexId = np.array(range(lenId))
scores = np.zeros(shape=(len(dataOutList),(lenId)//step))
for i in range((lenId)//step):
print("\rProgress {:2.1%}".format(i/lenId*step), end='')
train = Id[:(i*step)]
if pw is True:
indexTrain = (dfDataset['ID_1'].isin(train) & dfDataset['ID_2'].isin(train)).values
else:
indexTrain = dfDataset['ID'].isin(train).values
X_train = | np.vstack((enrichBox ,featureBox[indexTrain])) | numpy.vstack |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lib5c.util.plotting import plotter
from lib5c.plotters.colormaps import get_colormap
from hic3defdr.util.matrices import dilate, select_matrix
from hic3defdr.util.thresholding import threshold_and_cluster, size_filter
from hic3defdr.util.clusters import clusters_to_coo
from hic3defdr.util.classification import classify
from hic3defdr.plotting.heatmap import plot_heatmap
@plotter
def plot_grid(i, j, w, row, col, raw, scaled, mu_hat_alt, mu_hat_null, qvalues,
disp_idx, loop_idx, design, fdr, cluster_size, vmax=100,
fdr_vmid=0.05,
color_cycle=('blue', 'green', 'purple', 'yellow', 'cyan', 'red'),
despine=False, **kwargs):
"""
Plots a combination visualization grid focusing on a specific pixel on a
specific chromosome, combining heatmaps, cluster outlines, and stripplots.
Parameters
----------
i, j : int
The row and column index of the pixel to focus on.
w : int
The size of the heatmap will be ``2*w + 1`` bins in each dimension.
row, col : np.ndarray
The row and column indices corresponding to the rows of the ``raw`` and
``scaled`` matrices.
raw, scaled : np.ndarray
The raw and scaled data for each pixel (rows) and each replicate
(columns).
mu_hat_alt, mu_hat_null : np.ndarray
The estimated mean parameter under the alternate and null model,
respectively. First dimension is pixels for which dispersion was
estimated, whose row and column coordinates in the complete square
matrix are given by ``row[disp_idx]`` and ``col[disp_idx]``,
respectively. Columns of ``mu_hat_alt`` correspond to conditions, while
``mu_hat_null`` has no second dimension.
qvalues : np.ndarray
Vector of q-values called per pixel whose dispersion was estimated and
which lies in a loop. The row and column coordinates in the complete
square matrix are given by ``row[disp_idx][loop_idx]`` and
``col[disp_idx][loop_idx]``, respectively.
disp_idx : np.ndarray
Boolean matrix indicating which pixels in ``zip(row, col)`` had their
dispersion estimated.
loop_idx : np.ndarray
Boolean matrix indicating which pixels in
``zip(row[disp_idx], col[disp_idx])`` lie within loops.
design : pd.DataFrame
Pass a DataFrame with boolean dtype whose rows correspond to replicates
and whose columns correspond to conditions. Replicate and condition
names will be inferred from the row and column labels, respectively.
fdr : float
The FDR threshold to use when outlining clusters.
cluster_size : int
The cluster size threshold to use when outlining clusters.
vmax : float
The maximum of the colorscale to use when plotting normalized
heatmaps.
fdr_vmid : float
The FDR value at the middle of the colorscale used for plotting the
q-value heatmap.
color_cycle : list of matplotlib colors
The color cycle to use over conditions.
kwargs : kwargs
Typical plotter kwargs.
Returns
-------
pyplot axis, grid of pyplot axes, function
The first pyplot axis returned is injected by ``@plotter``. The grid of
pyplot axes is the second return value from the call to
``plt.subplots()`` that is used to create the grid. The function takes
two args, an FDR and a cluster size, and redraws the cluster outlines
using the new parameters.
"""
# precompute some things
max_reps = np.max(np.sum(design, axis=0))
idx = np.where((row[disp_idx] == i) & (col[disp_idx] == j))[0][0]
extent = [-0.5, 2 * w + 0.5, -0.5, 2 * w + 0.5]
rs, cs = slice(i - w, i + w + 1), slice(j - w, j + w + 1)
f = raw[disp_idx] / scaled[disp_idx]
n = max(row.max(), col.max()) + 1
mu_hat_alt_wide = np.dot(mu_hat_alt, design.values.T)
mean = | np.dot(scaled, design) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Fri May 2 11:48:55 2014
@author: rlabbe
"""
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import numpy.random as random
import matplotlib.pyplot as plt
''' should this be a class? seems like both sense and update are very
problem specific
'''
def bar_plot(pos, ylim=(0,1), title=None):
plt.cla()
ax = plt.gca()
x = np.arange(len(pos))
ax.bar(x, pos, color='#30a2da')
if ylim:
plt.ylim(ylim)
plt.xticks(x+0.4, x)
if title is not None:
plt.title(title)
class DiscreteBayes1D(object):
def __init__(self, world_map, belief=None):
self.world_map = copy.deepcopy(world_map)
self.N = len(world_map)
if belief is None:
# create belief, make all equally likely
self.belief = np.empty(self.N)
self.belief.fill (1./self.N)
else:
self.belief = copy.deepcopy(belief)
# This will be used to temporarily store calculations of the new
# belief. 'k' just means 'this iteration'.
self.belief_k = np.empty(self.N)
assert self.belief.shape == self.world_map.shape
def _normalize (self):
s = sum(self.belief)
self.belief = self.belief/s
def sense(self, Z, pHit, pMiss):
for i in range (self.N):
hit = (self.world_map[i] ==Z)
self.belief_k[i] = self.belief[i] * (pHit*hit + pMiss*(1-hit))
# copy results to the belief vector using swap-copy idiom
self.belief, self.belief_k = self.belief_k, self.belief
self._normalize()
def update(self, U, kernel):
N = self.N
kN = len(kernel)
width = int((kN - 1) / 2)
self.belief_k.fill(0)
for i in range(N):
for k in range (kN):
index = (i + (width-k)-U) % N
#print(i,k,index)
self.belief_k[i] += self.belief[index] * kernel[k]
# copy results to the belief vector using swap-copy idiom
self.belief, self.belief_k = self.belief_k, self.belief
def add_noise (Z, count):
n= len(Z)
for i in range(count):
j = random.randint(0,n)
Z[j] = | random.randint(0,2) | numpy.random.randint |
import random
from typing import List, Dict, Tuple
import math
import numpy as np
from synthetic_data.observation import Observation
import os
import pickle
import time
from llr.llr import *
# Created by <NAME> 29 November 2017
class Simulator:
def __init__(self, n_users: int, user_features: List[int],
n_items: int, item_features: int,
bias: int,
users_distribution: str = "zipf",
items_distribution: str = "zipf",
read_cache_dir: str = None,
save_cache_dir: str = None,
timestamp: bool=True,
tout: bool=True) -> None:
"""Produce a list of observations --users who "buy" items.
e.g.
```
s = Simulator(n_users=101, user_features=0, n_items=1500, item_features=10, bias=1.0)
s.run()
```
:param int n_users: Number of users
:param List[int] user_features: [feature1_n_values, feature2... ]
:param int n_items: Number of items
:param List[int] item_features: as for users
:param int bias: how similarity influences. If 0, at all. If 1, p(item after an item sim=-1)=0
:param int timestamp: unix-like timestamp (in seconds)
:return List[Tuple3]: list of observations (user_id, item_id, timestamp)
"""
self.user_buying_dict = {} # {user: [(item, timestamp), (), ...], ...}
self.observations_list = []
self._user_features = user_features
self._item_features = item_features
self.n_users = n_users
self.n_items = n_items
self._reset_cooccurrences_matrices()
self._reset_sequentials_matrices()
self.tout = tout
assert read_cache_dir is None or save_cache_dir is None, \
"saving and reading the cache at the same time does not make sense"
self.read_cache_dir = read_cache_dir
self.save_cache_dir = save_cache_dir
if bias >= 0:
self.bias = np.float32(bias)
else:
raise ValueError("Bias must be equal or bigger than 0")
# creating users
self.users = self.make_population(n=n_users,
features=user_features,
population_name="user")
# creating items
self.items = self.make_population(n=n_items,
features=item_features,
population_name="item")
print("INFO: creating user probability weights")
# probability of getting a user given a previous user
self._user_probability_weights = self.get_probability_weights(population=self.users,
cache_name="user",
distribution=users_distribution)
print("INFO: creating item probability weights")
# probability of getting a item given a previous item
self._item_probability_weights = self.get_probability_weights(population=self.items,
cache_name="item",
distribution=items_distribution)
# track times
self._cooccurrence_time = 0
self._sequential_time = 0
self._observations_time = 0
# to be updated each time we change observations
self.__hash = tuple(self.observations_list).__hash__()
self.__max_information = None
self.__recommender_information = None
# avoid computing twice the info for the same observations
self.__last_max_information = self.__hash
self.__last_recommender_information = self.__hash
if timestamp:
self._time_unites = 86400 # one day
else:
self._time_unites = 1
def _reset_cooccurrences_matrices(self):
# user has item
self.user_item_present = np.zeros(self.n_users * self.n_items).reshape((self.n_users, self.n_items))
# user does not have item
self.user_item_absent = np.ones(self.n_users * self.n_items).reshape((self.n_users, self.n_items))
# co-occurrence matrices
self.items_cooccurrence11 = np.zeros(self.n_items * self.n_items).reshape((self.n_items, self.n_items))
self.items_cooccurrence10 = np.zeros(self.n_items * self.n_items).reshape((self.n_items, self.n_items))
self.items_cooccurrence01 = np.zeros(self.n_items * self.n_items).reshape((self.n_items, self.n_items))
self.items_cooccurrence00 = np.zeros(self.n_items * self.n_items).reshape((self.n_items, self.n_items))
self.items_llr = | np.zeros(self.n_items * self.n_items) | numpy.zeros |
"""
Source code please refer to the following:
http://web.stanford.edu/~hrhakim/NMF/code.html
Description:
This file provides the functions used in implementing the proposed method
for Non-negative matrix factorization in the paper,
"Non-negative Matrix Factorization via Archetypal Analysis".
Link = https://arxiv.org/abs/1705.02994
Re-implemented into class-based code by:
<NAME> (<EMAIL>)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import nnls
from scipy.optimize import linprog
from hw3.libs.common.blend_dataset import BlendImgDataset
class NMF(BlendImgDataset):
def __init__(self, n_comp, o_img_size, shape, N, p):
self.n_comp = n_comp
super().__init__(o_img_size, df_dataset=True, shape=shape, N=N, p=p, all=True)
"""
Please go to the paper for the detail of the algorithm.
"""
def run(self, maxiter, delta, threshold, c1, c2, verbose, oracle):
self.W, self.H, self.L, self.Err = self.acc_palm_nmf(self.img_data.values, r=self.n_comp, maxiter=maxiter, delta=delta, threshold=threshold,
c1=c1, c2=c2, verbose=verbose, oracle=oracle)
def plot_result(self):
plt.figure()
plt.suptitle("Illustration of NMF features =%s from Zw (DR of X)" % self.n_comp)
for i in range(0, self.n_comp):
plt.subplot(1, 4, i + 1)
Vt_row = self.H[i, :].reshape(self.shape) # Reconstruct row into image for checkout
plt.title("H{}".format(i), size=8)
plt.imshow(Vt_row, cmap='gray') ## Display the image
plt.axis('off')
plt.tight_layout()
plt.show()
def D_distance(self, H1, H2):
# This function computes the 'L'-distance between the two set of vectors collected in the rows of H1 and H2. In our paper notation, this is $\mathscr{L}(H_1, H_2)$.
n1 = H1.shape[0]
n2 = H2.shape[0]
D = 0
for i in range(0, n1):
d = (np.linalg.norm(H1[i, :] - H2[0, :])) ** 2
for j in range(1, n2):
d = min(d, (np.linalg.norm(H1[i, :] - H2[j, :]) ** 2))
D = D + d
return D
# not used yet, in this implementation
def generate_weights(self, n, r, alpha, n_f, deg_prob):
# This function generates 'n' weight vectors in r-dimensions, distributed as Dirichlet(alpha, alpha, ..., alpha). 'n_f' is the number of weight vector which have zero components (induce points that lie on the faces) and 'deg_prob' is the distribution of the support size of these weight vectors. Namely, these weight vectors are distributed as Dirichlet over the set of nonzero entries which is a uniformly distributed set with a size randomly generated according to 'deg_prob'.
W = np.zeros((n, r))
for i in range(0, n_f):
deg_cdf = np.cumsum(deg_prob)
t = np.random.uniform(0, 1)
ind = np.nonzero(deg_cdf > t)
deg = np.min(ind) + 1
dirich_param = alpha * np.ones(deg)
w = np.random.dirichlet(dirich_param)
vertices = np.random.permutation(r)
vertices = vertices[0:deg]
W[i, vertices] = np.random.dirichlet(dirich_param)
for i in range(n_f, n):
dirich_param = alpha * np.ones(r)
W[i, :] = np.random.dirichlet(dirich_param)
return W
def l2distance(self, x, U, x0):
# This function computes <x-x0, (U^T*U)*(x-x0)>.
lx = np.linalg.norm(x - x0) ** 2
lpx = np.linalg.norm(np.dot(U, x - x0)) ** 2
return (lx - lpx)
def plot_H(self, H, col, type):
# This function plots the 'archetypes', (rows of 'H', when they are 2-dimensional) in 'col' color using 'type' as plot options.
v0 = H[:, 0]
v0 = np.append(v0, H[0, 0])
v1 = H[:, 1]
v1 = np.append(v1, H[0, 1])
hplt, = plt.plot(v0, v1, type, color=col, markersize=8, linewidth=3)
return hplt
def plot_data(self, X, col):
# This function plots the 'data points', (rows of 'X', when they are 2-dimensional) in 'col' color.
plt.plot(X[:, 0], X[:, 1], 'o', color=col, markersize=5)
def initH(self, X, r):
# This function computes 'r' initial archetypes given rows of 'X' as the data points. The method used here is the successive projections method explained in the paper.
n = X.shape[0]
d = X.shape[1]
H = np.zeros((r, d))
maxd = np.linalg.norm(X[0, :])
imax = 0
for i in range(1, n):
newd = np.linalg.norm(X[i, :])
if (newd > maxd):
imax = i
maxd = newd
H[0, :] = X[imax, :]
maxd = np.linalg.norm(X[0, :] - H[0, :])
imax = 0
for i in range(1, n):
newd = np.linalg.norm(X[i, :] - H[0, :])
if (newd > maxd):
imax = i
maxd = newd
H[1, :] = X[imax, :]
for k in range(2, r):
M = H[1:k, :] - np.outer(np.ones(k - 1), H[0, :])
[U, s, V] = np.linalg.svd(M, full_matrices=False)
maxd = self.l2distance(X[0, :], V, H[0, :])
imax = 0
for i in range(1, n):
newd = self.l2distance(X[i, :], V, H[0, :])
if (newd > maxd):
imax = i
maxd = newd
H[k, :] = X[imax, :]
return H
def project_simplex(self, x):
# This function computes the euclidean projection of vector 'x' onto the standard simplex.
n = len(x)
xord = -np.sort(-x)
sx = np.sum(x)
lam = (sx - 1.) / n
if (lam <= xord[n - 1]):
return (x - lam)
k = n - 1
flag = 0
while ((flag == 0) and (k > 0)):
sx -= xord[k]
lam = (sx - 1.) / k
if ((xord[k] <= lam) and (lam <= xord[k - 1])):
flag = 1
k -= 1
return np.fmax(x - lam, 0)
def project_principal(self, X, r):
# This function computes the rank 'r' pca estimate of columns of 'X'.
U, s, V = np.linalg.svd(X)
V = V[0:r, :]
U = U[:, 0:r]
s = s[0:r]
proj_X = np.dot(U, np.dot(np.diag(s), V))
return proj_X
def prune_convex(self, X):
# This function output the rows of 'X' which do not lie on the convex hull of the other rows.
n = X.shape[0]
indices = []
d = X.shape[1]
pruned_X = np.empty((0, d), int)
for i in range(0, n - 1):
print(i)
c = np.zeros(n - 1)
AEQ = np.delete(X, i, 0)
AEQ = np.transpose(AEQ)
AEQ = np.vstack([AEQ, np.ones((1, n - 1))])
BEQ = np.concatenate((X[i, :], [1]), 0)
res = linprog(c, A_ub=-1 * np.identity(n - 1), b_ub=np.zeros((n - 1, 1)), A_eq=AEQ, b_eq=np.transpose(BEQ),
options={"disp": True})
if (res.status == 2):
pruned_X = np.append(pruned_X, X[i, :].reshape(1, d), axis=0)
indices = np.append(indices, i)
return [indices.astype(int), pruned_X]
# project onto a line-segment
def proj_line_seg(self, X, x0):
# This function computes the projection of the point x0 onto the line segment between the points x1 and x2.
x1 = X[:, 0]
x2 = X[:, 1]
alpha = float(np.dot(np.transpose(x1 - x2), x0 - x2)) / (np.dot(np.transpose(x1 - x2), x1 - x2))
alpha = max(0, min(1, alpha))
y = alpha * x1 + (1 - alpha) * x2
theta = np.array([alpha, 1 - alpha])
return [theta, y]
# project onto a triangle
def proj_triangle(self, X, x0):
# This function computes the projection of the point x0 onto the triangle with corners specified with the rows of X.
d = len(x0)
XX = np.zeros((d, 2))
XX[:, 0] = X[:, 0] - X[:, 2]
XX[:, 1] = X[:, 1] - X[:, 2]
P = np.dot(np.linalg.inv(np.dot(np.transpose(XX), XX)), np.transpose(XX))
theta = np.append(np.dot(P, x0 - X[:, 2]), 1 - np.sum(np.dot(P, x0 - X[:, 2])))
y = np.dot(X, theta)
if ((any(theta < 0)) or (any(theta > 1)) or (np.sum(theta) != 1)):
d1 = np.linalg.norm(X[:, 0] - y)
d2 = np.linalg.norm(X[:, 1] - y)
d3 = np.linalg.norm(X[:, 2] - y)
theta4, y4 = self.proj_line_seg(X[:, [0, 1]], y)
d4 = np.linalg.norm(y - y4)
theta5, y5 = self.proj_line_seg(X[:, [0, 2]], y)
d5 = np.linalg.norm(y - y5)
theta6, y6 = self.proj_line_seg(X[:, [1, 2]], y)
d6 = np.linalg.norm(y - y6)
d = min(d1, d2, d3, d4, d5, d6)
if (d1 == d):
y = X[:, 0]
theta = np.array([1, 0, 0])
elif (d2 == d):
y = X[:, 1]
theta = np.array([0, 1, 0])
elif (d3 == d):
y = X[:, 2]
theta = np.array([0, 0, 1])
elif (d4 == d):
y = y4
theta = np.zeros(3)
theta[[0, 1]] = theta4
elif (d5 == d):
y = y5
theta = np.zeros(3)
theta[[0, 2]] = theta5
else:
y = y6
theta = np.zeros(3)
theta[[1, 2]] = theta6
return [theta, y]
# project onto a tetrahedron
def proj_tetrahedron(self, X, x0):
# This function computes the projection of the point x0 onto the tetrahedron with corners specified with the rows of X.
d = len(x0)
XX = np.zeros((d, 3))
XX[:, 0] = X[:, 0] - X[:, 3]
XX[:, 1] = X[:, 1] - X[:, 3]
XX[:, 2] = X[:, 2] - X[:, 3]
P = np.dot(np.linalg.inv(np.dot(np.transpose(XX), XX)), np.transpose(XX))
theta = np.append(np.dot(P, x0 - X[:, 3]), 1 - np.sum(np.dot(P, x0 - X[:, 3])))
y = np.dot(X, theta)
if ((any(theta < 0)) or (any(theta > 1)) or ( | np.sum(theta) | numpy.sum |
import numpy as np
import matplotlib.pyplot as plt
import accretion_code as ac
import file_tools as flt
from scipy.interpolate import interp1d
import dedalus.public as de
import file_tools as flt
def mag(x): return np.log10(np.abs(x)+1e-16)
import mpmath as mp
li2_obj = np.frompyfunc(lambda x: float(mp.polylog(2,x)),1,1)
li2 = lambda y: li2_obj(y).astype(float)
# stability diagrams
filename = 'regime-curves.h5'
curves = {}
for curve in flt.get_keys(filename):
curves[curve] = {'l':flt.load_data(filename,'l',group=curve)[0],
'g':flt.load_data(filename,'g',group=curve)[0]}
curve_splines = {curve: interp1d(curves[curve]['l'], curves[curve]['g']) for curve in curves}
fracbasis = de.Chebyshev('s',12,interval=(0,1))
fracs = fracbasis.grid()
c0 = curve_splines['equal-shock']
c1 = curve_splines['tangent-shock']
ls = np.linspace(0.2, 1.3, 20)
gs0 = c0(ls)
gs1 = c1(ls)
gs = gs0[:,None] + (gs1 - gs0)[:,None]*fracs[None,:]
# shock location and magnitude
dics = {}
ur0_rs = {}
for i in range(len(ls)):
for j in range(gs.shape[1]):
print(i,j)
li = ls[i]
gij = gs[i,j]
dics[i,j] = ac.stability(li,gij,out=False)
# growth rate calculation
i, j = 1,1
dic = dics[i, j]
λ1s = np.zeros(gs.shape)
λ2s = np.zeros(gs.shape)
avals = np.zeros(gs.shape)
for i in range(gs.shape[0]):
for j in range(gs.shape[1]):
l, g = ls[i], gs[i,j]
λ1s[i,j] = dics[i,j]['λ_s1']
λ2s[i,j] = dics[i,j]['λ_s2']
from scipy.interpolate import RectBivariateSpline
λ1_spline = RectBivariateSpline(ls, fracs, λ1s)
λ2_spline = RectBivariateSpline(ls, fracs, λ2s)
ls_high = np.linspace(.2,1.3,100)
fracs_high = np.linspace(.005,.995,100)
λ1s_high = λ1_spline(ls_high, fracs_high)
λ2s_high = λ2_spline(ls_high, fracs_high)
import matplotlib.colors as colors
frac = | np.linspace(0,1,gs.shape[1],endpoint=False) | numpy.linspace |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = | N.array([1,1,0]) | numpy.array |
import numpy as np
def length(mat):
return np.max(mat.shape)
def ismember(mat1, mat2):
return np.isin(mat1, mat2)
def incVAT(RV, C, I, RI, d, distance_previous_points):
I_old = I
C_old = C
new_point_index = np.max(I.shape)+1
new_point_location = np.max(I.shape)+1
for j in range(np.max(I.shape)):
value, index = np.min(distance_previous_points[0:j]), np.argmin(
distance_previous_points[0:j])
if value < d[j]:
new_point_location = j+1
break
else:
[_, index] = np.argmin(distance_previous_points)
remaining_points = I[new_point_location:-1]
remaining_points_old_points_method = remaining_points
remaining_points_location_in_RV = np.max(RV.shape)
remaining_points_old_points_method_location_in_RV = remaining_points_location_in_RV
included_old_points = np.empty()
included_old_points_location_in_RV = np.empty()
pointer_last_point = new_point_location-1
d_remaining = d[new_point_location-1:-1]
C_remaining = C[new_point_location: -1]
I = np.array([I[0:new_point_location], new_point_index])
d = np.array([d[0:new_point_location-1],
np.min(distance_previous_points[0:new_point_location])])
RV_reordering = np.linspace(0, new_point_location)
C = np.array([C[0:new_point_location], index])
method = np.empty()
for k in range(np.max(remaining_points.shape)):
min_dist_old_points = d_remaining[0]
closest_old_points = remaining_points_old_points_method[0]
closest_old_points_location_RV = remaining_points_location_in_RV[0]
[_, closest_point_C_remaining_old_points] = np.isin(
I_old[C_remaining[0]], I)
dist_new_point = distance_previous_points[remaining_points_location_in_RV]
[min_dist_new_point, index] = np.min(
dist_new_point), np.argmin(dist_new_point)
closest_new_point_location_RV = remaining_points_location_in_RV[index]
closest_new_point = remaining_points[index]
closest_point_C_remaining_new_point = new_point_location
dist_included_old_points = RV[
included_old_points_location_in_RV, remaining_points_location_in_RV]
if np.max(included_old_points_location_in_RV.shape) == 1:
[value1, index1] = min(dist_included_old_points)
[_, closest_point_C_included_old_points] = np.isin(
included_old_points, I)
else:
[value, index] = min(dist_included_old_points)
[value1, index1] = min(value)
[_, closest_point_C_included_old_points] = np.isin(
included_old_points(index[index1]), I)
min_dist_included_old_points = value1
closest_included_old_points_location_RV = remaining_points_location_in_RV[index1]
closest_included_old_points = remaining_points[index1]
if np.shape(min_dist_included_old_points) == (0, 0):
[min_dist_all, min_dist_method] = np.min(np.array([min_dist_old_points, min_dist_new_point])), np.argmin(
np.array([min_dist_old_points, min_dist_new_point]))
else:
[min_dist_all, min_dist_method] = np.min(np.array(
[min_dist_old_points, min_dist_new_point, min_dist_included_old_points])), np.argmin(np.array(
[min_dist_old_points, min_dist_new_point, min_dist_included_old_points]))
if min_dist_method == 1:
method = np.array([method, 1])
I = np.array([I, closest_old_points])
d = np.array([d, min_dist_old_points])
C = np.array([C, closest_point_C_remaining_old_points])
RV_reordering = np.array(
[RV_reordering, closest_old_points_location_RV])
remaining_points[remaining_points ==
closest_old_points] = np.empty()
remaining_points_old_points_method[remaining_points_old_points_method ==
closest_old_points] = np.empty()
remaining_points_old_points_method_location_in_RV[remaining_points_old_points_method_location_in_RV ==
closest_old_points_location_RV] = np.empty()
remaining_points_location_in_RV[remaining_points_location_in_RV ==
closest_old_points_location_RV] = np.empty()
pointer_last_point = pointer_last_point+1
d_remaining[0] = np.empty()
C_remaining[0] = np.empty()
if np.max(remaining_points_old_points_method.shape) > 0:
while np.isin(remaining_points_old_points_method[0], I):
pointer_last_point = pointer_last_point+1
d_remaining[0] = np.empty()
C_remaining[0] = np.empty()
remaining_points_old_points_method[0] = np.empty()
remaining_points_old_points_method_location_in_RV[0] = np.empty(
)
if np.max(remaining_points_old_points_method.shape) == 0:
break
if min_dist_method == 2:
method = np.array([method, 2])
I = np.array([I, closest_old_points])
d = np.array([d, min_dist_old_points])
C = np.array([C, closest_point_C_remaining_old_points])
if closest_new_point == remaining_points[0]:
if length(remaining_points_old_points_method) > 0:
while ismember(remaining_points_old_points_method[0], I):
pointer_last_point = pointer_last_point+1
d_remaining[0] = np.empty()
C_remaining[0] = np.empty()
included_old_points[included_old_points ==
remaining_points_old_points_method[0]] = np.empty()
included_old_points_location_in_RV[included_old_points_location_in_RV ==
remaining_points_old_points_method_location_in_RV[0]] = np.empty()
remaining_points_old_points_method[0] = np.empty()
remaining_points_old_points_method_location_in_RV[0] = np.empty(
)
if length(remaining_points_old_points_method) == 0:
break
else:
included_old_points = np.array(
[included_old_points, closest_new_point])
included_old_points_location_in_RV = np.array(
[included_old_points_location_in_RV, closest_new_point_location_RV])
RV_reordering = np.array(
[RV_reordering, closest_new_point_location_RV])
remaining_points[remaining_points ==
closest_new_point] = np.empty()
remaining_points_location_in_RV[remaining_points_location_in_RV ==
closest_new_point_location_RV] = np.empty()
if min_dist_method == 3:
method = np.array([method, 3])
I = np.array([I, closest_old_points])
d = np.array([d, min_dist_old_points])
C = np.array([C, closest_point_C_remaining_old_points])
if length(remaining_points_old_points_method) > 0:
while ismember(remaining_points_old_points_method[0], I):
pointer_last_point = pointer_last_point+1
d_remaining[0] = np.empty()
C_remaining[0] = np.empty()
included_old_points[included_old_points ==
remaining_points_old_points_method[0]] = np.empty()
included_old_points_location_in_RV[included_old_points_location_in_RV ==
remaining_points_old_points_method_location_in_RV[0]] = | np.empty() | numpy.empty |
import warnings
import numpy as np
import xsimlab as xs
@xs.process
class BorderBoundary:
"""Sets boundary conditions at grid borders.
Borders are defined in the following order:
left, right, top, bottom
Border status can be one of:
- "core" (open boundary)
- "fixed_value" (closed boundary)
- "looped" (periodic boundary)
"fixed_value" must be set for at least one border. This is the minimal
constraint in order to make the numerical model solvable.
"looped" must be symmetric, i.e., defined for (left, right)
or (top, bottom).
Note that currently if "core" is set for two opposed borders these
will have periodic conditions (this comes from a current limitation in
fastscapelib-fortran which will be solved in a next release).
"""
status = xs.variable(
dims=[(), 'border'],
default="fixed_value",
description='node status at borders',
static=True
)
border = xs.index(
dims='border', description='4-border boundaries coordinate'
)
border_status = xs.variable(
dims='border',
intent='out',
description='node status at the 4-border boundaries'
)
ibc = xs.variable(
intent='out',
description='boundary code used by fastscapelib-fortran'
)
@status.validator
def _check_status(self, attribute, value):
if not np.isscalar(value) and len(value) != 4:
raise ValueError(
"Border status should be defined for all borders "
f"(left, right, top, bottom), found {value}"
)
valid = ["fixed_value", "core", "looped"]
bs = list(np.broadcast_to(value, 4))
for s in bs:
if s not in valid:
raise ValueError(f"Invalid border status {s!r}, must be one of {valid}")
if "fixed_value" not in bs:
raise ValueError(
f"There must be at least one border with status 'fixed_value', found {bs}"
)
def invalid_looped(s):
return bool(s[0] == "looped") ^ bool(s[1] == "looped")
if invalid_looped(bs[:2]) or invalid_looped(bs[2:]):
raise ValueError(f"Periodic boundary conditions must be symmetric, found {bs}")
def initialize(self):
self.border = np.array(['left', 'right', 'top', 'bottom'])
bstatus = np.array( | np.broadcast_to(self.status, 4) | numpy.broadcast_to |
# FUNZIONI PER LA VALUTAZIONE DEI MODELLI.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import resample
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.preprocessing import MinMaxScaler
################# VALUTAZIONE MODELLI CON TRAINING/VALIDATION/TEST
# Funzione che calcola lo score di un dato modello rispetto al training set, validation set e test set.
# In input si passa il modello e il dataset (X,y). Posso specificare se voglio scalare le features in X con MinMaxScaler. Si può specificare inoltre la
# size del test set, il random state per splittare in training e test, il numero di fold per la cross validation. Infine si può specificare se si tratta
# di regressione o classificazione.
# X ha dimenzione (n_istanze,n_features) ; y ha dimensione (n_istanze,).
def compute_train_val_test(model ,X ,y ,scale=False ,test_size=0.2 ,random_state=123, cv=5 ,regr=True):
scoring=""
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
if(scale): # Scalo le features in X
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
# Splitto in training e test.
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
# Cross validation
scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring)
val_acc = scores.mean() # score sul validation
if regr:
val_acc = -val_acc
model.fit(X_train_80,y_train_80) # Fitto usando tutto il training.
# Calcolo la score sul training e sul test.
train_acc=0
test_acc=0
if regr:
train_acc = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_acc = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test))
else:
train_acc = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_acc = accuracy_score(y_true=y_test, y_pred=model.predict(X_test))
return train_acc, val_acc, test_acc # Ritorno una tripla : score sul training, validation, test.
# Funzione che data una lista modelli calcola per ciascuno di essi lo score su training/validation/test. Ritorna una lista dove per ogni modello è
# appunto calcolato lo score su training/validation/test (tripla) e ritorna anche l'indice del modello migliore : il modello migliore è quello con score
# migliore rispetto al validation.
# Alla funzione passo la lista di modelli da valutare e il dataset completo (X,y). Posso inoltre passare:
# - scale, test_size, random_state, cv, regr --> come spiegato in precedenza
# - plotta --> specifica alla funzione se deve fare il grafico sulla valutazione dei modelli
# - plottaTrain --> specifica se nel grafico si vuole mostrare anche lo score dei vari modelli sul training
# - plottaTest --> specifica se nel grafico si vuole mostrare anche lo score dei vari modelli sul test set
# - xvalues --> specifica i valori da mettere sull'asse delle x
# - xlabel --> specifica l'etichetta da mettere sull'asse x
# - title --> specifica il titolo da mettere al grafico
def model_selection_TrainValTest(model_list, X, y, scale=False, test_size=0.2, random_state=123, cv=5, regr=True, plotta=False, plottaTrain=False,
plottaTest=False, xvalues=None, xlabel="Complessità", title="Valutazione modelli con Training/Validation/Test"):
if(scale): # Scalo le features in X, se specificato.
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
# Lista trainValTest_list : conterrà per ogni modello la tripla degli score su training/validation/test.
trainValTest_list = []
# Calcolo i vari score per ogni modello.
for model in model_list:
trainValTest_list.append(list(compute_train_val_test(model ,X, y, False, test_size, random_state, cv, regr=regr)))
trainValTest_list = np.array(trainValTest_list) # in numpy
if(plotta): # Faccio il grafico
if(xvalues is None): # Valori di deafult sull'asse delle x
xvalues = range(len(model_list))
fig, ax = plt.subplots(figsize=(6,6))
if plottaTrain: # Devo disegnare anche lo score sul training set.
ax.plot(xvalues,trainValTest_list[:,0], 'o:', label='Train')
ax.plot(xvalues,trainValTest_list[:,1], 'o:', label='Validation') # Score validation
if plottaTest: # Devo disegnare anche lo score sul test set.
ax.plot(xvalues,trainValTest_list[:,2], 'o:', label='Test')
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
ax.legend()
# Ritorno una coppia : la lista di score train/val/test per ogni modello ; l'indice del modello con score sul validation migliore.
if regr: # regressione
return trainValTest_list, np.argmin(trainValTest_list,axis=0)[1]
return trainValTest_list, | np.argmax(trainValTest_list,axis=0) | numpy.argmax |
import torch
import torch.nn as nn
import numpy as np
import sys, os
import librosa
import argparse
sys.path.append("/workspace/WPBERT/Integer/WPBERT_REP_WP_Modified")
from WPBERT import CharBertForMaskedLM
from CharBERT.modeling.configuration_bert import BertConfig
#from modify_audio import embed
from modify_audio_res import embed
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pretrain_path", default=None, type=str, required=True, \
help="The path of trained Language Model")
parser.add_argument("--model_name", default=None, type=str, required=True, \
help = "The name of model that will be included in write path")
parser.add_argument("--hidden", default=None, type=int, required=True,\
help="The hidden layer you want to get representations. Should be one of seq, seqchar, char")
parser.add_argument("--repr", default=None, type=str, required=True, \
help="The representation type you want to get")
parser.add_argument("--gpu", default=0, type=int, \
help="The gpu number you want to use")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=f"{args.gpu}"
#datapath = '/data/babymind/eval_data/semantic/dev'
datapath = '/data/babymind/eval_data/semantic/test'
pretrain_path = args.pretrain_path
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
embedding = embed(device = device)
config = BertConfig(max_positional_embeddings = 512, type_vocab_size=1, vocab_size=1028, \
num_hidden_layers=8, num_attention_heads=8, hidden_size=512, intermediate_size=2048, output_hidden_states = True)
model = CharBertForMaskedLM.from_pretrained(pretrained_model_name_or_path = pretrain_path, config=config)
model.to(device)
model.eval()
for (path, dirs, files) in os.walk(datapath):
for file_name in files:
if file_name.find('.wav')!=-1:
file_path = os.path.join(path, file_name)
audio, sr = librosa.load(file_path, None)
input_embed, phone_embed, start_ids, end_ids = embedding.forward(audio)
input_embed = input_embed.unsqueeze(0)
phone_embed = phone_embed.unsqueeze(0)
start_ids = start_ids.unsqueeze(0)
end_ids = end_ids.unsqueeze(0)
with torch.no_grad():
outputs = model(inputs_embeds = input_embed, char_input_ids = phone_embed,\
start_ids = start_ids, end_ids = end_ids, masked_lm_labels=None)
#sequence hidden state: outputs[5][1-9]
#char hidden state: outputs [6][1-9]
hidden = args.hidden
if args.repr=="seq":
sequence_repr = outputs[5][hidden].squeeze(dim=0)
elif args.repr=="char":
sequence_repr = outputs[6][hidden].squeeze(dim=0)
elif args.repr=="seqchar":
sequence_repr = torch.cat([outputs[5][hidden].squeeze(dim=0),outputs[6][hidden].squeeze(dim=0)], dim=1)
dir_path = path.replace("eval_data", f"{args.model_name}_semantic/hidden{hidden}/{args.repr}")
if not os.path.exists(dir_path):
os.makedirs(dir_path)
new_path = file_path.replace(".wav", ".txt")
new_path = new_path.replace("eval_data", f"{args.model_name}_semantic/hidden{hidden}/{args.repr}")
sequence_repr = sequence_repr.cpu().numpy()
if sequence_repr.shape[0]==1:
sequence_repr = np.append(sequence_repr, sequence_repr, axis = 0)
| np.savetxt(new_path, sequence_repr) | numpy.savetxt |
#######################################################################################################################
# Project QSRCNN: Quantized Speech Reconstruction using Convolutional Neural Network V1.0
# =====================================================================================================================
# DataPrepare.py: Data prepare and load data
#
#
# =====================================================================================================================
# Technische Universität Braunschweig, IfN
# Author: <NAME>.
# Date: 20.05.2017
#######################################################################################################################
import os
import time
import h5py as h5
import numpy as np
import scipy.io as sio
from numpy import random
# -------------------------------------------------------------------------------
# 1. load data
# -------------------------------------------------------------------------------
def load_train_data(train_inputs, train_targets, vali_inputs, vali_targets):
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
print('> Loading data ')
start = time.time()
# ---------------------------------------------------------
# 1. Load Input Data for Training
# ---------------------------------------------------------
mat_input = train_inputs
mat_input = os.path.normcase(mat_input)
print('> 1. Loading Training Input: ' + mat_input + '...')
x_train_noisy = sio.loadmat(mat_input)
x_train_noisy = x_train_noisy['inputSetNorm']
x_train_noisy = np.array(x_train_noisy)
# ---------------------------------------------------------
# 2. Load Target Data for training
# ---------------------------------------------------------
mat_target = train_targets
mat_target = os.path.normcase(mat_target)
print('> 2. Loading Training Target: ' + mat_target + '...')
x_train = sio.loadmat(mat_target)
x_train = x_train['targetSet']
x_train = np.array(x_train)
# x_train = target_min_max_scaler.fit_transform(x_train)
# ---------------------------------------------------------
# 3. Load Input Data for Validation
# ---------------------------------------------------------
mat_input_vali = vali_inputs
mat_input_vali = os.path.normcase(mat_input_vali)
print('> 3. Loading Validation Input: ' + mat_input_vali + '...')
x_train_noisy_vali = sio.loadmat(mat_input_vali)
x_train_noisy_vali = x_train_noisy_vali['inputSetNorm']
x_train_noisy_vali = np.array(x_train_noisy_vali)
# ---------------------------------------------------------
# 4. Load Target Data for Validation
# ---------------------------------------------------------
mat_target_vali = vali_targets
mat_target_vali = os.path.normcase(mat_target_vali)
print('> 4. Loading Validation Target: ' + mat_target_vali + '...')
x_train_vali = sio.loadmat(mat_target_vali)
x_train_vali = x_train_vali['targetSet']
x_train_vali = | np.array(x_train_vali) | numpy.array |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import random
import math
import os
import time
import json
# from utils.VLSW import pad_all_cases
from VLSW import pad_all_cases
# set the random seeds for reproducability
SEED = 1234
random.seed(SEED)
def preprocess_df(df):
""" The training and testing data are manually selected.
:param df: dataframe with raw data
:return:
"""
df.set_index('date', inplace=True)
pm25 = df[['pm2.5']]
# Standlization, use StandardScaler
scaler_x = StandardScaler()
scaler_x.fit(pm25['pm2.5'].values.reshape(-1, 1))
pm25['pm2.5'] = scaler_x.transform(pm25['pm2.5'].values.reshape(-1, 1))
df_train = pm25.loc['2/01/2010 0:00':'31/12/2013 23:00'].copy()
df_test = pm25.loc['1/01/2014 0:00':'31/12/2014 23:00'].copy()
return df_train, df_test, scaler_x
def train_val_test_generate(dataframe, model_params):
'''
:param dataframe: processed dataframe
:param model_params: for input dim
:return: train_x, train_y, test_x, test_y with the same length (by padding zero)
'''
train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples = pad_all_cases(
dataframe, dataframe['pm2.5'].values, model_params,
model_params['min_before'], model_params['max_before'],
model_params['min_after'], model_params['max_after'],
model_params['output_length'])
train_val_test_y = np.expand_dims(train_val_test_y, axis=2)
return train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples
def train_test_split_SSIM(x, y, x_len, x_before_len, model_params, SEED):
'''
:param x: all x samples
:param y: all y samples
:param model_params: parameters
:param SEED: random SEED
:return: train set, test set
'''
# check and remove samples with NaN (just incase)
index_list = []
for index, (x_s, y_s, len_s,
len_before_s) in enumerate(zip(x, y, x_len, x_before_len)):
if (np.isnan(x_s).any()) or (np.isnan(y_s).any()):
index_list.append(index)
x = np.delete(x, index_list, axis=0)
y = np.delete(y, index_list, axis=0)
x_len = np.delete(x_len, index_list, axis=0)
x_before_len = np.delete(x_before_len, index_list, axis=0)
x_train, x_test, y_train, y_test = train_test_split(x,
y,
test_size=None,
random_state=SEED,
shuffle=False)
x_train_len, x_test_len = train_test_split(x_len,
test_size=None,
random_state=SEED,
shuffle=False)
x_train_before_len, x_test_before_len = train_test_split(x_before_len,
test_size=None,
random_state=SEED,
shuffle=False)
return x_train, y_train, x_train_len, x_train_before_len
def test_pm25_single_station():
train_sampling_params = {
'dim_in': 1,
'output_length': 5,
'min_before': 4,
'max_before': 4,
'min_after': 6,
'max_after': 6,
'file_path': '../data/simplified_PM25.csv'
}
test_sampling_params = {
'dim_in': 1,
'output_length': 5,
'min_before': 4,
'max_before': 4,
'min_after': 6,
'max_after': 6,
'file_path': '../data/simplified_PM25.csv'
}
filepath = 'data/simplified_PM25.csv'
df = pd.read_csv(filepath, dayfirst=True)
df_train, df_test, scaler_x = preprocess_df(df)
x_samples, y_samples, x_len, x_before_len = train_val_test_generate(
df_train, train_sampling_params)
print('X_samples:{}'.format(x_samples.shape))
print('y_samples:{}'.format(y_samples.shape))
x_train, y_train, x_train_len, x_train_before_len = train_test_split_SSIM(
x_samples, y_samples, x_len, x_before_len, train_sampling_params, SEED)
print('x_train:{}'.format(x_train.shape))
print('y_train:{}'.format(y_train.shape))
print('x_train_len:{}'.format(x_train_len.shape))
print('x_train_before_len:{}'.format(x_train_before_len.shape))
x_train = x_train[:944700, :, :]
y_train = y_train[:944700, :, :]
x_train_len = x_train_len[:944700]
x_train_before_len = x_train_before_len[:944700]
x_samples, y_samples, x_len, x_before_len = train_val_test_generate(
df_test, test_sampling_params)
print('X_samples:{}'.format(x_samples.shape))
print('y_samples:{}'.format(y_samples.shape))
x_test, y_test, x_test_len, x_test_before_len = train_test_split_SSIM(
x_samples, y_samples, x_len, x_before_len, test_sampling_params, SEED)
print('x_test:{}'.format(x_test.shape))
print('y_test:{}'.format(y_test.shape))
print('x_test_len:{}'.format(x_test_len.shape))
print('x_test_before_len:{}'.format(x_test_before_len.shape))
x_test = x_test[:6500, :, :]
y_test = y_test[:6500, :, :]
x_test_len = x_test_len[:6500]
x_test_before_len = x_test_before_len[:6500]
return (x_train, y_train, x_train_len,
x_train_before_len), (x_test, y_test, x_test_len,
x_test_before_len)
def generate_delta(mask_array):
deltas = np.zeros(mask_array.shape)
timetable = np.arange(mask_array.shape[1])
# fill the delta vectors
for index, value in np.ndenumerate(mask_array):
# print(index,value)
# '''
# index[0] = row, agg
# index[1] = col, time
# '''
if index[1] == 0:
deltas[index[0], index[1]] = 0
elif mask_train[index[0], index[1] - 1] == 0:
deltas[index[0], index[1]] = timetable[index[1]] - timetable[
index[1] - 1] + deltas[index[0], index[1] - 1]
else:
deltas[index[0], index[1]] = timetable[index[1]] - timetable[
index[1] - 1]
return deltas
def generate_masks(x_sample_array, first_split_loc, second_split_loc):
# split x samples
sample_list = np.split(x_sample_array, [first_split_loc, second_split_loc],
axis=1)
mask_before = np.ones(sample_list[0].shape)
mask_middle = np.zeros(sample_list[1].shape)
mask_after = np.ones(sample_list[2].shape)
mask_all = np.concatenate((mask_before, mask_middle, mask_after), axis=1)
return mask_all
def generate_eval_mask(x_sample_array, first_split_loc, second_split_loc):
# split x samples
sample_list = np.split(x_sample_array, [first_split_loc, second_split_loc],
axis=1)
mask_before = np.zeros(sample_list[0].shape)
mask_middle = np.ones(sample_list[1].shape)
mask_after = np.zeros(sample_list[2].shape)
mask_all = np.concatenate((mask_before, mask_middle, mask_after), axis=1)
return mask_all
def generate_eval(x_sample_array, y_sample_array, first_split_loc,
second_split_loc):
# split x samples
sample_list = np.split(x_sample_array, [first_split_loc, second_split_loc],
axis=1)
value_list = np.concatenate(
(sample_list[0], y_sample_array, sample_list[2]), axis=1)
return value_list
def generate_dicts(eval_list, eval_mask_list, value_list, masks_list,
delta_list, forward_list, eval_list_bac, eval_mask_list_bac,
value_list_bac, masks_list_bac, delta_list_bac,
forward_list_bac, train_label):
size = value_list.shape[0]
total_samples = []
for i in range(size):
line_dict = dict.fromkeys(['forward', 'backward', 'label', 'is_train'])
temp_dict = dict.fromkeys(
['values', 'masks', 'deltas', 'forwards', 'evals', 'eval_masks'])
# forward
temp_dict['values'] = value_list[i].flatten().tolist()
temp_dict['masks'] = masks_list[i].flatten().tolist()
temp_dict['deltas'] = delta_list[i].flatten().tolist()
temp_dict['forwards'] = value_list[i].flatten().tolist()
temp_dict['evals'] = eval_list[i].flatten().tolist()
temp_dict['eval_masks'] = eval_mask_list[i].flatten().tolist()
line_dict['forward'] = [temp_dict]
# backward
temp_dict['values'] = value_list_bac[i].flatten().tolist()
temp_dict['masks'] = masks_list_bac[i].flatten().tolist()
temp_dict['deltas'] = delta_list_bac[i].flatten().tolist()
temp_dict['forwards'] = value_list_bac[i].flatten().tolist()
temp_dict['evals'] = eval_list_bac[i].flatten().tolist()
temp_dict['eval_masks'] = eval_mask_list_bac[i].flatten().tolist()
line_dict['backward'] = [temp_dict]
# label
line_dict['label'] = train_label
# train/test
line_dict['is_train'] = train_label
total_samples.append(line_dict)
return total_samples
# def write_to_json(pred_dict_list):
if __name__ == "__main__":
train_sampling_params = {
'dim_in': 1,
'output_length': 5,
'min_before': 5,
'max_before': 5,
'min_after': 5,
'max_after': 5,
'file_path': '../data/simplified_PM25.csv'
}
test_sampling_params = {
'dim_in': 1,
'output_length': 5,
'min_before': 5,
'max_before': 5,
'min_after': 5,
'max_after': 5,
'file_path': '../data/simplified_PM25.csv'
}
filepath = 'data/simplified_PM25.csv'
df = pd.read_csv(filepath, dayfirst=True)
df_train, df_test, scaler_x = preprocess_df(df)
print(df_train.head())
x_samples, y_samples, x_len, x_before_len = train_val_test_generate(
df_train, train_sampling_params)
print('X_samples:{}'.format(x_samples.shape))
print('y_samples:{}'.format(y_samples.shape))
x_train, y_train, x_train_len, x_train_before_len = train_test_split_SSIM(
x_samples, y_samples, x_len, x_before_len, train_sampling_params, SEED)
print('x_train:{}'.format(x_train.shape))
print('y_train:{}'.format(y_train.shape))
print('x_train_len:{}'.format(x_train_len.shape))
print('x_train_before_len:{}'.format(x_train_before_len.shape))
x_samples, y_samples, x_len, x_before_len = train_val_test_generate(
df_test, test_sampling_params)
print('X_samples:{}'.format(x_samples.shape))
print('y_samples:{}'.format(y_samples.shape))
x_test, y_test, x_test_len, x_test_before_len = train_test_split_SSIM(
x_samples, y_samples, x_len, x_before_len, test_sampling_params, SEED)
print('x_test:{}'.format(x_test.shape))
print('y_test:{}'.format(y_test.shape))
print('x_test_len:{}'.format(x_test_len.shape))
print('x_test_before_len:{}'.format(x_test_before_len.shape))
# forward dictionary:
# mask
mask_train = generate_masks(
x_train, train_sampling_params['min_before'],
train_sampling_params['min_before'] +
train_sampling_params['output_length'])
mask_test = generate_masks(
x_test, test_sampling_params['min_before'],
test_sampling_params['min_before'] +
test_sampling_params['output_length'])
# eval, before elimination
value_train = generate_eval(
x_train, y_train, train_sampling_params['min_before'],
train_sampling_params['min_before'] +
train_sampling_params['output_length'])
value_test = generate_eval(
x_test, y_test, test_sampling_params['min_before'],
test_sampling_params['min_before'] +
test_sampling_params['output_length'])
# eval mask list
# eval_masks_train = np.ones(mask_train.shape)
# eval_masks_test = np.ones(mask_test.shape)
eval_masks_train = generate_eval_mask(
x_train, train_sampling_params['min_before'],
train_sampling_params['min_before'] +
train_sampling_params['output_length'])
eval_masks_test = generate_eval_mask(
x_test, test_sampling_params['min_before'],
test_sampling_params['min_before'] +
test_sampling_params['output_length'])
# value list, after elimination
# x_train
# x_test
# generate deltas list
delta_train = generate_delta(mask_train)
delta_test = generate_delta(mask_test)
#-------------------------------------------#
# backward dictionary:
# backward the train/test first
x_train_backward = np.flip(x_train, axis=1)
y_train_backward = np.flip(y_train, axis=1)
x_test_backward = | np.flip(x_test, axis=1) | numpy.flip |
import numpy as np
from scipy.spatial.distance import cdist
class Segreg(object):
def __init__(self):
self.attributeMatrix = np.matrix([]) # attributes matrix full size - all columns
self.location = [] # x and y coordinates from tract centroid (2D lists)
self.pop = [] # population of each groups by tract (2D lists)
self.pop_sum = [] # total population of the tract (sum all groups)
self.locality = [] # population intensity by groups by tract
self.n_location = 0 # length of list (n lines) (attributeMatrix.shape[0])
self.n_group = 0 # number of groups (attributeMatrix.shape[1] - 4)
self.costMatrix = [] # scipy cdist distance matrix
self.tract_id = [] # tract ids in string format
def readAttributesFile(self, filepath):
"""
This function reads the csv file and populate the class's attributes. Data has to be exactly in the
following format or results will be wrong:
area id, x_coord, y_coord, attribute 1, attributes 2, attributes 3, attribute n...
:param filepath: path with file to be read
:return: attribute Matrix [n,n]
"""
raw_data = np.genfromtxt(filepath, skip_header=1, delimiter=",", filling_values=0, dtype=None)
data = [list(item)[1:] for item in raw_data]
self.attributeMatrix = np.asmatrix(data)
n = self.attributeMatrix.shape[1]
self.location = self.attributeMatrix[:, 0:2]
self.location = self.location.astype('float')
self.pop = self.attributeMatrix[:, 2:n].astype('int')
# self.pop[np.where(self.pop < 0)[0], np.where(self.pop < 0)[1]] = 0
self.n_group = n-2
self.n_location = self.attributeMatrix.shape[0]
self.pop_sum = np.sum(self.pop, axis=1)
self.tract_id = np.asarray([x[0] for x in raw_data]).astype(str)
self.tract_id = self.tract_id.reshape((self.n_location, 1))
return self.attributeMatrix
def getWeight(self, distance, bandwidth, weightmethod=1):
"""
This function computes the weights for neighborhood. Default value is Gaussian(1)
:param distance: distance in meters to be considered for weighting
:param bandwidth: bandwidth in meters selected to perform neighborhood
:param weightmethod: method to be used: 1-gussian , 2-bi square and empty-moving windows
:return: weight array for internal use
"""
distance = np.asarray(distance.T)
if weightmethod == 1:
weight = np.exp((-0.5) * (distance/bandwidth) * (distance/bandwidth))
elif weightmethod == 2:
weight = (1 - (distance/bandwidth)*(distance/bandwidth)) * (1 - (distance/bandwidth)*(distance/bandwidth))
sel = np.where(distance > bandwidth)
weight[sel[0]] = 0
elif weightmethod == 3:
weight = (1 + (distance * 0))
sel = np.where(distance > bandwidth)
weight[sel[0]] = 0
else:
raise Exception('Invalid weight method selected!')
return weight
def cal_timeMatrix(self, bandwidth, weightmethod, matrix):
"""
This function calculate the local population intensity for all groups based on a time matrix.
:param bandwidth: bandwidth for neighborhood in meters
:param weightmethod: 1 for gaussian, 2 for bi-square and empty for moving window
:param matrix: path/file for input time matrix
:return: 2d array like with population intensity for all groups
"""
n_local = self.location.shape[0]
n_subgroup = self.pop.shape[1]
locality_temp = np.empty([n_local, n_subgroup])
for index in range(0, n_local):
for index_sub in range(0, n_subgroup):
cost = matrix[index, :].reshape(1, n_local)
weight = self.getWeight(cost, bandwidth, weightmethod)
locality_temp[index, index_sub] = np.sum(weight * np.asarray(self.pop[:, index_sub])) / np.sum(weight)
self.locality = locality_temp
self.locality[np.where(self.locality < 0)[0], np.where(self.locality < 0)[1]] = 0
return locality_temp
def cal_localityMatrix(self, bandwidth=5000, weightmethod=1):
"""
This function calculate the local population intensity for all groups.
:param bandwidth: bandwidth for neighborhood in meters
:param weightmethod: 1 for gaussian, 2 for bi-square and empty for moving window
:return: 2d array like with population intensity for all groups
"""
n_local = self.location.shape[0]
n_subgroup = self.pop.shape[1]
locality_temp = np.empty([n_local, n_subgroup])
for index in range(0, n_local):
for index_sub in range(0, n_subgroup):
cost = cdist(self.location[index, :], self.location)
weight = self.getWeight(cost, bandwidth, weightmethod)
locality_temp[index, index_sub] = np.sum(weight * np.asarray(self.pop[:, index_sub]))/np.sum(weight)
self.locality = locality_temp
self.locality[np.where(self.locality < 0)[0], np.where(self.locality < 0)[1]] = 0
return locality_temp
def cal_localDissimilarity(self):
"""
Compute local dissimilarity for all groups.
:return: 1d array like with results for all groups, size of localities
"""
if len(self.locality) == 0:
lj = np.ravel(self.pop_sum)
tjm = np.asarray(self.pop) * 1.0 / lj[:, None]
tm = np.sum(self.pop, axis=0) * 1.0 / np.sum(self.pop)
index_i = np.sum(np.asarray(tm) * np.asarray(1 - tm))
pop_total = np.sum(self.pop)
local_diss = np.sum(1.0 * np.array(np.fabs(tjm - tm)) *
np.asarray(self.pop_sum).ravel()[:, None] / (2 * pop_total * index_i), axis=1)
else:
lj = np.asarray(np.sum(self.locality, axis=1))
tjm = self.locality * 1.0 / lj[:, None]
tm = np.sum(self.pop, axis=0) * 1.0 / | np.sum(self.pop) | numpy.sum |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
# Rows of _ndat with nans converted to ones
_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
[0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
[1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
[0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
# Rows of _ndat with nans converted to zeros
_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
[0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in")
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = | np.argmin(self.mat) | numpy.argmin |
"""pyNRC - Python ETC and Simulator for JWST NIRCam"""
# Import libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.io import ascii
from webbpsf_ext.webbpsf_ext_core import NIRCam_ext
from .nrc_utils import *
from .detops import det_timing, multiaccum, nrc_header
from webbpsf_ext.webbpsf_ext_core import _check_list
from tqdm.auto import trange, tqdm
import pysiaf
from pysiaf import rotations
from . import conf
from .logging_utils import setup_logging
import logging
_log = logging.getLogger('pynrc')
__epsilon = np.finfo(float).eps
class DetectorOps(det_timing):
"""
Class to hold detector operations information. Includes SCA attributes such as
detector names and IDs as well as :class:`multiaccum` class for ramp settings.
Parameters
----------------
detector : int, str
NIRCam detector ID (481-490) or SCA ID (A1-B5).
wind_mode : str
Window mode type 'FULL', 'STRIPE', 'WINDOW'.
xpix : int
Size of window in x-pixels for frame time calculation.
ypix : int
Size of window in y-pixels for frame time calculation.
x0 : int
Lower-left x-coord position of detector window.
y0 : int
Lower-left y-coord position of detector window.
nff : int
Number of fast row resets.
Keyword Args
------------
read_mode : str
NIRCam Ramp Readout mode such as 'RAPID', 'BRIGHT1', etc.
nint : int
Number of integrations (ramps).
ngroup : int
Number of groups in a integration.
nf : int
Number of frames per group.
nd1 : int
Number of drop frame after reset (before first group read).
nd2 : int
Number of drop frames within a group (ie., groupgap).
nd3 : int
Number of drop frames after final read frame in ramp.
Examples
--------
Use kwargs functionality to pass keywords to the multiaccum class.
Send via a dictionary of keywords and values:
>>> kwargs = {'read_mode':'RAPID', 'nint':5, 'ngroup':10}
>>> d = DetectorOps(**kwargs)
Set the keywords directly:
>>> d = DetectorOps(read_mode='RAPID', nint=5, ngroup=10)
"""
def __init__(self, detector=481, wind_mode='FULL', xpix=2048, ypix=2048,
x0=0, y0=0, nff=None, **kwargs):
super().__init__(wind_mode=wind_mode, xpix=xpix, ypix=ypix,
x0=x0, y0=y0, mode='JWST', nff=nff, **kwargs)
# Typical values for SW/LW detectors that get saved based on SCA ID.
# After setting the SCA ID, these various parameters can be updated,
# however they will be reset whenever the SCA ID is modified.
# - Pixel Scales in arcsec/pix
# - Well saturation level in e-
# - Typical dark current values in e-/sec (ISIM CV3)
# - Read Noise in e-
# - IPC and PPC in %
# - p_excess: Parameters that describe the excess variance observed in
# effective noise plots.
self._properties_SW = {'pixel_scale':pixscale_SW, 'dark_current':0.002, 'read_noise':11.5,
'IPC':0.54, 'PPC':0.09, 'p_excess':(1.0,5.0), 'ktc':37.6,
'well_level':105e3, 'well_level_old':81e3}
self._properties_LW = {'pixel_scale':pixscale_LW, 'dark_current':0.034, 'read_noise':10.0,
'IPC':0.60, 'PPC':0.19, 'p_excess':(1.5,10.0), 'ktc':36.8,
'well_level':83e3, 'well_level_old':75e3}
# Automatically set the pixel scale based on detector selection
self.auto_pixscale = True
self._gain_list = {481:2.07, 482:2.01, 483:2.16, 484:2.01, 485:1.83,
486:2.00, 487:2.42, 488:1.93, 489:2.30, 490:1.85}
self._scaids = {481:'A1', 482:'A2', 483:'A3', 484:'A4', 485:'A5',
486:'B1', 487:'B2', 488:'B3', 489:'B4', 490:'B5'}
# Allow user to specify name using either SCA ID or Detector ID (ie., 481 or 'A1')
try: # First, attempt to set SCA ID
self.scaid = detector
except ValueError:
try: # If that doesn't work, then try to set Detector ID
self.detid = get_detname(detector)[3:]
except ValueError: # If neither work, raise ValueError exception
raise ValueError("Invalid detector: {0} \n\tValid names are: {1},\n\t{2}" \
.format(detector, ', '.join(self.detid_list), \
', '.join(str(e) for e in self.scaid_list)))
# For full arrays number of resets in first integration is 0
# self.wind_mode = wind_mode
_log.info('Initializing SCA {}/{}'.format(self.scaid,self.detid))
@property
def wind_mode(self):
"""Window mode attribute"""
return self._wind_mode
@wind_mode.setter
def wind_mode(self, value):
"""Set Window mode attribute"""
self._wind_mode = value
self.multiaccum.nr1 = 0 if value=='FULL' else 1
@property
def scaid(self):
"""Selected SCA ID from detectors in the `scaid_list` attribute. 481, 482, etc."""
return self._scaid
@property
def detid(self):
"""Selected Detector ID from detectors in the `detid_list` attribute. A1, A2, etc."""
return self._detid
@property
def detname(self):
"""Selected Detector ID from detectors in the `scaid_list` attribute. NRCA1, NRCA2, etc."""
return self._detname
# Used for setting the SCA ID then updating all the other detector properties
@scaid.setter
def scaid(self, value):
"""Set SCA ID (481, 482, ..., 489, 490). Automatically updates other relevant attributes."""
_check_list(value, self.scaid_list, var_name='scaid')
self._scaid = value
self._detid = self._scaids.get(self._scaid)
# Detector Name (as stored in FITS headers): NRCA1, NRCALONG, etc.
if self.channel=='LW': self._detname = 'NRC' + self.module + 'LONG'
else: self._detname = 'NRC' + self._detid
# Select various detector properties (pixel scale, dark current, read noise, etc)
# depending on LW or SW detector
dtemp = self._properties_LW if self.channel=='LW' else self._properties_SW
if self.auto_pixscale:
self.pixelscale = dtemp['pixel_scale']
self.ktc = dtemp['ktc']
self.dark_current = dtemp['dark_current']
self.read_noise = dtemp['read_noise']
self.IPC = dtemp['IPC']
self.PPC = dtemp['PPC']
self.p_excess = dtemp['p_excess']
self.well_level = dtemp['well_level']
self.gain = self._gain_list.get(self._scaid, 2.0)
# Similar to scaid.setter, except if detector ID is specified.
@detid.setter
def detid(self, value):
"""Set detector ID (A1, A2, ..., B4, B5). Automatically updates other relevant attributes."""
if 'NRC' in value:
value = value[3:]
_check_list(value, self.detid_list, var_name='detid')
# Switch dictionary keys and values, grab the corresponding SCA ID,
# and then call scaid.setter
newdict = {y:x for x,y in self._scaids.items()}
self.scaid = newdict.get(value)
@property
def scaid_list(self):
"""Allowed SCA IDs"""
return sorted(list(self._scaids.keys()))
@property
def detid_list(self):
"""Allowed Detector IDs"""
return sorted(list(self._scaids.values()))
@property
def module(self):
"""NIRCam modules A or B (inferred from detector ID)"""
return self._detid[0]
@property
def channel(self):
"""Detector channel 'SW' or 'LW' (inferred from detector ID)"""
return 'LW' if self.detid.endswith('5') else 'SW'
def xtalk(self, file_path=None):
"""Detector cross talk information"""
if file_path is None:
file = 'xtalk20150303g0.errorcut.txt'
file_path = os.path.join(conf.PYNRC_PATH, 'sim_params', file)
xt_coeffs = ascii.read(file_path, header_start=0)
ind = xt_coeffs['Det'] == self.detid
return xt_coeffs[ind]
def pixel_noise(self, fsrc=0.0, fzodi=0.0, fbg=0.0, rn=None, ktc=None, idark=None,
p_excess=None, ng=None, nf=None, verbose=False, **kwargs):
"""Noise values per pixel.
Return theoretical noise calculation for the specified MULTIACCUM exposure
in terms of e-/sec. This uses the pre-defined detector-specific noise
properties. Can specify flux of a source as well as background and
zodiacal light (in e-/sec/pix). After getting the noise per pixel per
ramp (integration), value(s) are divided by the sqrt(NINT) to return
the final noise
Parameters
----------
fsrc : float or image
Flux of source in e-/sec/pix
fzodi : float or image
Flux of the zodiacal background in e-/sec/pix
fbg : float or image
Flux of telescope background in e-/sec/pix
idark : float or image
Option to specify dark current in e-/sec/pix.
rn : float
Option to specify Read Noise per pixel (e-).
ktc : float
Option to specify kTC noise (in e-). Only valid for single frame (n=1)
p_excess : array-like
Optional. An array or list of two elements that holds the parameters
describing the excess variance observed in effective noise plots.
By default these are both 0. For NIRCam detectors, recommended
values are [1.0,5.0] for SW and [1.5,10.0] for LW.
ng : None or int or image
Option to explicitly states number of groups. This is specifically
used to enable the ability of only calculating pixel noise for
unsaturated groups for each pixel. If a numpy array, then it should
be the same shape as `fsrc` image. By default will use `self.ngroup`.
verbose : bool
Print out results at the end.
Keyword Arguments
-----------------
ideal_Poisson : bool
If set to True, use total signal for noise estimate,
otherwise MULTIACCUM equation is used.
Notes
-----
fsrc, fzodi, and fbg are functionally the same as they are immediately summed.
They can also be single values or multiple elements (list, array, tuple, etc.).
If multiple inputs are arrays, make sure their array sizes match.
"""
ma = self.multiaccum
if ng is None:
ng = ma.ngroup
if nf is None:
nf = ma.nf
if rn is None:
rn = self.read_noise
if ktc is None:
ktc = self.ktc
if p_excess is None:
p_excess = self.p_excess
if idark is None:
idark = self.dark_current
# Pixel noise per ramp (e-/sec/pix)
pn = pix_noise(ngroup=ng, nf=nf, nd2=ma.nd2, tf=self.time_frame,
rn=rn, ktc=ktc, p_excess=p_excess,
idark=idark, fsrc=fsrc, fzodi=fzodi, fbg=fbg, **kwargs)
# Divide by sqrt(Total Integrations)
final = pn / np.sqrt(ma.nint)
if verbose:
print('Noise (e-/sec/pix): {}'.format(final))
print('Total Noise (e-/pix): {}'.format(final*self.time_exp))
return final
@property
def fastaxis(self):
"""Fast readout direction in sci coords"""
# https://jwst-pipeline.readthedocs.io/en/latest/jwst/references_general/references_general.html#orientation-of-detector-image
# 481, 3, 5, 7, 9 have fastaxis equal -1
# Others have fastaxis equal +1
fastaxis = -1 if np.mod(self.scaid,2)==1 else +1
return fastaxis
@property
def slowaxis(self):
"""Slow readout direction in sci coords"""
# https://jwst-pipeline.readthedocs.io/en/latest/jwst/references_general/references_general.html#orientation-of-detector-image
# 481, 3, 5, 7, 9 have slowaxis equal +2
# Others have slowaxis equal -2
slowaxis = +2 if np.mod(self.scaid,2)==1 else -2
return slowaxis
def make_header(self, filter=None, pupil_mask=None, obs_time=None, **kwargs):
"""
Create a generic NIRCam FITS header.
Parameters
----------
filter :str
Name of filter element.
pupil_mask : str
Name of pupil element.
obs_time : datetime
Specifies when the observation was considered to be executed.
If not specified, then it will choose the current time.
This must be a datetime object:
>>> datetime.datetime(2016, 5, 9, 11, 57, 5, 796686)
"""
return nrc_header(self, filter=filter, pupil=pupil_mask, obs_time=obs_time, **kwargs)
class NIRCam(NIRCam_ext):
"""NIRCam base instrument class
Creates a NIRCam instrument class that holds all the information pertinent to
an observation using a given observation. This class extends the NIRCam subclass
``webbpsf_ext.NIRCam_ext``, to generate PSF coefficients to calculate an arbitrary
PSF based on wavelength, field position, and WFE drift.
In addition to PSF generation, includes ability to estimate detector saturation
limits, sensitivities, and perform ramp optimizations.
Parameters
==========
filter : str
Name of input filter.
pupil_mask : str, None
Pupil elements such as grisms or lyot stops (default: None).
image_mask : str, None
Specify which coronagraphic occulter (default: None).
ND_acq : bool
Add in neutral density attenuation in throughput and PSF creation?
Used primarily for sensitivity and saturation calculations.
Not recommended for simulations (TBI).
detector : int or str
NRC[A-B][1-5] or 481-490
apname : str
Pass specific SIAF aperture name, which will update pupil mask, image mask,
and detector subarray information.
autogen_coeffs : bool
Automatically generate base PSF coefficients. Equivalent to performing
``self.gen_psf_coeff()``. Default: True
WFE drift and field-dependent coefficients should be run manually via
``gen_wfedrift_coeff``, ``gen_wfefield_coeff``, and ``gen_wfemask_coeff``.
Keyword Args
============
wind_mode : str
Window mode type 'FULL', 'STRIPE', 'WINDOW'.
xpix : int
Size of window in x-pixels for frame time calculation.
ypix : int
Size of window in y-pixels for frame time calculation.
x0 : int
Lower-left x-coord position of detector window.
y0 : int
Lower-left y-coord position of detector window.
read_mode : str
NIRCam Ramp Readout mode such as 'RAPID', 'BRIGHT1', etc.
nint : int
Number of integrations (ramps).
ngroup : int
Number of groups in a integration.
nf : int
Number of frames per group.
nd1 : int
Number of drop frame after reset (before first group read).
nd2 : int
Number of drop frames within a group (ie., groupgap).
nd3 : int
Number of drop frames after final read frame in ramp.
nr1 : int
Number of reset frames within first ramp.
nr2 : int
Number of reset frames for subsequent ramps.
PSF Keywords
============
fov_pix : int
Size of the PSF FoV in pixels (real SW or LW pixels).
The defaults depend on the type of observation.
Odd number place the PSF on the center of the pixel,
whereas an even number centers it on the "crosshairs."
oversample : int
Factor to oversample during WebbPSF calculations.
Default 2 for coronagraphy and 4 otherwise.
include_si_wfe : bool
Include SI WFE measurements? Default=True.
include_distortions : bool
If True, will include a distorted version of the PSF.
pupil : str
File name or HDUList specifying telescope entrance pupil.
Can also be an OTE_Linear_Model.
pupilopd : tuple or HDUList
Tuple (file, index) or filename or HDUList specifying OPD.
Can also be an OTE_Linear_Model.
wfe_drift : float
Wavefront error drift amplitude in nm.
offset_r : float
Radial offset from the center in arcsec.
offset_theta :float
Position angle for radial offset, in degrees CCW.
bar_offset : float
For wedge masks, option to set the PSF position across the bar.
jitter : str or None
Currently either 'gaussian' or None.
jitter_sigma : float
If ``jitter = 'gaussian'``, then this is the size of the blurring effect.
npsf : int
Number of wavelengths/PSFs to fit.
ndeg : int
Degree of polynomial fit.
nproc : int
Manual setting of number of processor cores to break up PSF calculation.
If set to None, this is determined based on the requested PSF size,
number of available memory, and hardware processor cores. The automatic
calculation endeavors to leave a number of resources available to the
user so as to not crash the user's machine.
save : bool
Save the resulting PSF coefficients to a file? (default: True)
force : bool
Forces a recalculation of PSF even if saved PSF exists. (default: False)
quick : bool
Only perform a fit over the filter bandpass with a lower default polynomial degree fit.
(default: True)
use_legendre : bool
Fit with Legendre polynomials, an orthonormal basis set. (default: True)
"""
def __init__(self, filter=None, pupil_mask=None, image_mask=None,
ND_acq=False, detector=None, apname=None, autogen_coeffs=True, **kwargs):
if detector is not None:
detector = get_detname(detector)
# Available Filters
# Note: Certain narrowband filters reside in the pupil wheel and cannot be paired
# with pupil elements. This will be checked for later.
self._filters_sw = ['F070W', 'F090W', 'F115W', 'F150W', 'F150W2', 'F200W',
'F140M', 'F162M', 'F182M', 'F210M', 'F164N', 'F187N', 'F212N']
self._filters_lw = ['F277W', 'F322W2', 'F356W', 'F444W', 'F323N', 'F405N', 'F466N', 'F470N',
'F250M', 'F300M', 'F335M', 'F360M', 'F410M', 'F430M', 'F460M', 'F480M']
# Coronagraphic Masks
self._coron_masks = [None, 'MASK210R', 'MASK335R', 'MASK430R', 'MASKSWB', 'MASKLWB']
# self.offset_bar = offset_bar
# Pupil Wheel elements
self._lyot_masks = ['CIRCLYOT', 'WEDGELYOT']
# DHS in SW and Grisms in LW
self._dhs = ['DHS0', 'DHS60']
# Grism0/90 => GrismR/C
self._grism = ['GRISMR', 'GRISMC']
# Weak lens are only in SW pupil wheel (+4 in filter wheel)
self._weak_lens = ['WLP4', 'WLPM4', 'WLP8', 'WLM8', 'WLP12']
# Check alternate inputs
if pupil_mask is not None:
pupil_mask = pupil_mask.upper()
# If alternate Weak Lens values are specified
if 'WL' in pupil_mask:
wl_alt = {
'WEAK LENS +4': 'WLP4',
'WEAK LENS +8': 'WLP8',
'WEAK LENS -8': 'WLM8',
'WEAK LENS +12 (=4+8)': 'WLP12',
'WEAK LENS -4 (=4-8)': 'WLM4',
}
pupil_mask = wl_alt.get(pupil_mask, pupil_mask)
# Pair F200W throughput with WL+4
# The F212N2 throughput is then handled in read_filter() function
wl_list = ['WLP12', 'WLM4', 'WLP4']
if (pupil_mask in wl_list) and ((filter is None) or (filter!='F200W')):
filter = 'F200W'
# Check Grism alternate inputs
if 'GRISM0' in pupil_mask:
pupil_mask = 'GRISMR'
elif 'GRISM90' in pupil_mask:
pupil_mask = 'GRISMC'
super().__init__(filter=filter, pupil_mask=pupil_mask, image_mask=image_mask, **kwargs)
if apname is None:
if detector is not None:
self.detector = detector
self._ND_acq = ND_acq
self._validate_wheels()
self.update_detectors(**kwargs)
ap_name_rec = self.get_siaf_apname()
self.update_from_SIAF(ap_name_rec, pupil_mask=pupil_mask)
else:
self.update_from_SIAF(apname, pupil_mask=pupil_mask, **kwargs)
# Default to no jitter for coronagraphy
self.options['jitter'] = None if self.is_coron else 'gaussian'
# Generate PSF coefficients
if autogen_coeffs:
self.gen_psf_coeff(**kwargs)
# Background fov pix is only for coronagraphic masks
# Create a background reference class
self._fov_pix_bg = 33
self._fov_bg_match = False
# if autogen_coeffs:
self._update_bg_class(**kwargs)
# Check aperture info is consistent if not explicitly specified
ap_name_rec = self.get_siaf_apname()
if ((apname is None) and (ap_name_rec != self.aperturename) and
not (('FULL' in self.aperturename) and ('TAMASK' in self.aperturename))):
# Warning strings
out_str1 = f'Current aperture {self.aperturename} does not match recommendation ({ap_name_rec}).'
out_str2 = f'Perhaps try self.aperturename = self.get_siaf_apname()'
_log.info(out_str1)
_log.info(out_str2)
def _update_bg_class(self, fov_bg_match=None, **kwargs):
"""
If there is a coronagraphic spot or bar, then we may need to
generate another background PSF for sensitivity information.
It's easiest just to ALWAYS do a small footprint without the
coronagraphic mask and save the PSF coefficients.
WARNING: This assumes throughput of the coronagraphic substrate
for observations with a Lyot pupil mask.
Parameters
==========
fov_bg_match : bool or None
Determines whether or not to match bg FoV to sci FoV for
coronagraphic observations. If set to None, default to
`self._fov_bg_match` property. If a boolean value is
provided, then `self._fov_bg_match` is updated.
"""
try:
# Make sure we don't inadvertently delete base object
if self._nrc_bg is not self:
del self._nrc_bg
except AttributeError:
pass
# Update background PSF size if fov_bg_match is True
if fov_bg_match is not None:
self._fov_bg_match = fov_bg_match
self._fov_pix_bg = self.fov_pix if self._fov_bg_match else self._fov_pix_bg
if self._image_mask is None:
self._nrc_bg = self
else:
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
nrc_bg = NIRCam_ext(filter=self.filter, pupil_mask=self.pupil_mask,
fov_pix=self._fov_pix_bg, oversample=self._oversample)
# Generate coefficients
nrc_bg.gen_psf_coeff(**kwargs)
setup_logging(log_prev, verbose=False)
# Save as attribute
self._nrc_bg = nrc_bg
# Allowed values for filters, coronagraphic masks, and pupils
# @property
# def filter_list(self):
# """List of allowable filters."""
# return self._filters_sw + self._filters_lw
# @property
# def image_mask_list(self):
# """List of allowable coronagraphic mask values."""
# return self._coron_masks
# @property
# def pupil_mask_list(self):
# """List of allowable pupil mask values."""
# return ['CLEAR','FLAT'] + self._lyot_masks + self._grism + self._dhs + self._weak_lens
def plot_bandpass(self, ax=None, color=None, title=None, **kwargs):
"""
Plot the instrument bandpass on a selected axis.
Can pass various keywords to ``matplotlib.plot`` function.
Parameters
----------
ax : matplotlib.axes, optional
Axes on which to plot bandpass.
color :
Color of bandpass curve.
title : str
Update plot title.
Returns
-------
matplotlib.axes
Updated axes
"""
if ax is None:
f, ax = plt.subplots(**kwargs)
color='C2' if color is None else color
bp = self.bandpass
w = bp.wave / 1e4; f = bp.throughput
ax.plot(w, f, color=color, label=bp.name+' Filter', **kwargs)
ax.set_xlabel('Wavelength ($\mathdefault{\mu m}$)')
ax.set_ylabel('Throughput')
if title is None:
title = bp.name + ' - Module ' + self.module
ax.set_title(title)
return ax
# Check consistencies
def _validate_wheels(self):
"""
Validation to make sure the selected filters and pupils are allowed to be in parallel.
"""
def do_warn(wstr):
_log.warning(wstr)
_log.warning('Proceed at your own risk!')
filter = self._filter
pupil_mask = self._pupil_mask
image_mask = self._image_mask
if self.channel=='long' or self.channel=='LW':
channel = 'LW'
else:
channel = 'SW'
if image_mask is None:
image_mask = ''
if pupil_mask is None:
pupil_mask = ''
# Weak lenses can only occur in SW modules
if ('WEAK LENS' in pupil_mask) and (channel=='LW'):
wstr = '{} in pupil is not valid with filter {}.'.format(pupil_mask,filter)
wstr = wstr + '\nWeak lens only in SW module.'
do_warn(wstr)
# DHS in SW modules
if ('DHS' in pupil_mask) and (channel=='LW'):
wstr = '{} in pupil is not valid with filter {}.'.format(pupil_mask,filter)
wstr = wstr + '\nDHS only in SW module.'
do_warn(wstr)
# DHS cannot be paired with F164N or F162M
flist = ['F164N', 'F162M']
if ('DHS' in pupil_mask) and (filter in flist):
wstr = 'Both {} and filter {} exist in same pupil wheel.'.format(pupil_mask,filter)
do_warn(wstr)
# Grisms in LW modules
if ('GRISM' in pupil_mask) and (channel=='SW'):
wstr = '{} in pupil is not valid with filter {}.'.format(pupil_mask,filter)
wstr = wstr + '\nGrisms only in LW module.'
do_warn(wstr)
# Grisms cannot be paired with any Narrowband filters
flist = ['F323N', 'F405N', 'F466N', 'F470N']
if ('GRISM' in pupil_mask) and (filter in flist):
wstr = 'Both {} and filter {} exist in same pupil wheel.'.format(pupil_mask,filter)
do_warn(wstr)
# MASK430R falls in SW SCA gap and cannot be seen by SW module
if ('MASK430R' in image_mask) and (channel=='SW'):
wstr = '{} mask is no visible in SW module (filter is {})'.format(image_mask,filter)
do_warn(wstr)
# Need F200W paired with WEAK LENS +4
# The F212N2 filter is handled in the read_filter function
wl_list = ['WEAK LENS +12 (=4+8)', 'WEAK LENS -4 (=4-8)', 'WEAK LENS +4']
if (pupil_mask in wl_list) and (filter!='F200W'):
wstr = '{} is only valid with filter F200W.'.format(pupil_mask)
do_warn(wstr)
# Items in the same SW pupil wheel
sw2 = ['WEAK LENS +8', 'WEAK LENS -8', 'F162M', 'F164N', 'CIRCLYOT', 'WEDGELYOT']
if (filter in sw2) and (pupil_mask in sw2):
wstr = '{} and {} are both in the SW Pupil wheel.'.format(filter,pupil_mask)
do_warn(wstr)
# Items in the same LW pupil wheel
lw2 = ['F323N', 'F405N', 'F466N', 'F470N', 'CIRCLYOT', 'WEDGELYOT']
if (filter in lw2) and (pupil_mask in lw2):
wstr = '{} and {} are both in the LW Pupil wheel.'.format(filter,pupil_mask)
do_warn(wstr)
# ND_acq must have a LYOT stop, otherwise coronagraphic mask is not in FoV
if self.ND_acq and ('LYOT' not in pupil_mask):
wstr = 'CIRCLYOT or WEDGELYOT must be in pupil wheel if ND_acq=True.'
do_warn(wstr)
# ND_acq and coronagraphic mask are mutually exclusive
if self.ND_acq and (image_mask != ''):
wstr = 'If ND_acq is set, then mask must be None.'
do_warn(wstr)
def update_detectors(self, verbose=False, **kwargs):
""" Update detector operation parameters
Creates detector object based on :attr:`detector` attribute.
This function should be called any time a filter, pupil, mask, or
module is modified by the user.
If the user wishes to change any properties of the multiaccum ramp
or detector readout mode, pass those arguments through this function
rather than creating a whole new NIRCam() instance. For example:
>>> nrc = pynrc.NIRCam('F430M', ngroup=10, nint=5)
>>> nrc.update_detectors(ngroup=2, nint=10, wind_mode='STRIPE', ypix=64)
A dictionary of the keyword settings can be referenced in :attr:`det_info`.
This dictionary cannot be modified directly.
Parameters
----------
verbose : bool
Print out ramp and detector settings.
Keyword Args
------------
wind_mode : str
Window mode type 'FULL', 'STRIPE', 'WINDOW'.
xpix : int
Size of window in x-pixels for frame time calculation.
ypix : int
Size of window in y-pixels for frame time calculation.
x0 : int
Lower-left x-coord position of detector window.
y0 : int
Lower-left y-coord position of detector window.
read_mode : str
NIRCam Ramp Readout mode such as 'RAPID', 'BRIGHT1', etc.
nint : int
Number of integrations (ramps).
ngroup : int
Number of groups in a integration.
nf : int
Number of frames per group.
nd1 : int
Number of drop frame after reset (before first group read).
nd2 : int
Number of drop frames within a group (ie., groupgap).
nd3 : int
Number of drop frames after final read frame in ramp.
nr1 : int
Number of reset frames within first ramp.
nr2 : int
Number of reset frames for subsequent ramps.
"""
# Check if kwargs is empty
if not kwargs:
try:
kwargs = self.det_info
except AttributeError:
kwargs = {}
else:
try:
self._det_info.update(kwargs)
except AttributeError:
self._det_info = kwargs
kwargs = self.det_info
# Update detector class
# For now, it's just easier to delete old instances and start from scratch
# rather than tracking changes and updating only the changes. That could
# get complicated, and I don't think there is a memory leak from deleting
# the Detector instances.
try:
del self.Detector
except AttributeError:
pass
self.Detector = DetectorOps(detector=self.detector, **kwargs)
# Update stored kwargs
kw1 = self.Detector.to_dict()
_ = kw1.pop('detector', None)
kw2 = self.multiaccum.to_dict()
self._det_info = merge_dicts(kw1,kw2)
if verbose:
print('New Ramp Settings')
keys = ['read_mode', 'nf', 'nd2', 'ngroup', 'nint']
for k in keys:
v = self.det_info[k]
if isinstance(v,float): print("{:<9} : {:>8.0f}".format(k, v))
else: print(" {:<10} : {:>8}".format(k, v))
print('New Detector Settings')
keys = ['wind_mode', 'xpix', 'ypix', 'x0', 'y0']
for k in keys:
v = self.det_info[k]
if isinstance(v,float): print("{:<9} : {:>8.0f}".format(k, v))
else: print(" {:<10} : {:>8}".format(k, v))
print('New Ramp Times')
ma = self.multiaccum_times
keys = ['t_group', 't_frame', 't_int', 't_int_tot1', 't_int_tot2', 't_exp', 't_acq']
for k in keys:
print(' {:<10} : {:>8.3f}'.format(k, ma[k]))
def update_psf_coeff(self, filter=None, pupil_mask=None, image_mask=None, detector=None,
fov_pix=None, oversample=None, include_si_wfe=None, include_distortions=None,
pupil=None, pupilopd=None, offset_r=None, offset_theta=None, bar_offset=None,
jitter=None, jitter_sigma=None, npsf=None, ndeg=None, nproc=None, quick=None,
save=None, force=False, use_legendre=None, **kwargs):
""" Update properties and create new set of PSF coefficients
Parameters
----------
filter : str
Name of NIRCam filter.
pupil_mask : str, None
NIRCam pupil elements such as grisms or lyot stops (default: None).
image_mask : str, None
Specify which coronagraphic occulter (default: None).
detector : str
Name of detector (e.g., "NRCA5")
fov_pix : int
Size of the PSF FoV in pixels (real SW or LW pixels).
The defaults depend on the type of observation.
Odd number place the PSF on the center of the pixel,
whereas an even number centers it on the "crosshairs."
oversample : int
Factor to oversample during WebbPSF calculations.
Default 2 for coronagraphy and 4 otherwise.
include_si_wfe : bool
Include SI WFE measurements? Default=True.
include_distortions : bool
If True, will include a distorted version of the PSF.
pupil : str
File name or HDUList specifying telescope entrance pupil.
Can also be an OTE_Linear_Model.
pupilopd : tuple or HDUList
Tuple (file, index) or filename or HDUList specifying OPD.
Can also be an OTE_Linear_Model.
wfe_drift : float
Wavefront error drift amplitude in nm.
offset_r : float
Radial offset from the center in arcsec.
offset_theta :float
Position angle for radial offset, in degrees CCW.
bar_offset : float
For wedge masks, option to set the PSF position across the bar.
jitter : str or None
Currently either 'gaussian' or None.
jitter_sigma : float
If ``jitter = 'gaussian'``, then this is the size of the blurring effect.
npsf : int
Number of wavelengths/PSFs to fit.
ndeg : int
Degree of polynomial fit.
nproc : int
Manual setting of number of processor cores to break up PSF calculation.
If set to None, this is determined based on the requested PSF size,
number of available memory, and hardware processor cores. The automatic
calculation endeavors to leave a number of resources available to the
user so as to not crash the user's machine.
save : bool
Save the resulting PSF coefficients to a file? (default: True)
force : bool
Forces a recalcuation of PSF even if saved PSF exists. (default: False)
quick : bool
Only perform a fit over the filter bandpass with a lower default polynomial degree fit.
(default: True)
use_legendre : bool
Fit with Legendre polynomials, an orthonormal basis set. (default: True)
"""
update_coeffs = False
update_bg_coeffs = False
# filter, pupil mask, and image mask
if (filter is not None) and (filter != self.filter):
update_coeffs = True
update_bg_coeffs = True
self.filter = filter
if (pupil_mask is not None) and (pupil_mask != self.pupil_mask):
update_coeffs = True
update_bg_coeffs = True
if (pupil_mask.upper()=="CLEAR") or (pupil_mask.upper()=="NONE"):
pupil_mask = None
self.pupil_mask = pupil_mask
if (image_mask is not None) and (image_mask != self.image_mask):
update_coeffs = True
update_bg_coeffs = True
if (image_mask.upper()=="CLEAR") or (image_mask.upper()=="NONE"):
image_mask = None
self.image_mask = image_mask
if (fov_pix is not None) and (fov_pix != self.fov_pix):
update_coeffs = True
self.fov_pix = fov_pix
if (oversample is not None) and (oversample != self.oversample):
update_coeffs = True
self.oversample = oversample
# SI WFE and distortions
if (include_si_wfe is not None) and (include_distortions != self.include_distortions):
update_coeffs = True
self.include_si_wfe = include_si_wfe
if (include_distortions is not None) and (include_distortions != self.include_distortions):
update_coeffs = True
self.include_distortions = include_distortions
# Pupil OPD information
if (pupil is not None) and (self.pupil != pupil):
update_coeffs = True
self.pupil = pupil
if (pupilopd is not None) and (self.pupilopd != pupilopd):
update_coeffs = True
self.pupilopd = pupilopd
# Source and mask offsetting
if (offset_r is not None) and (self.options.get('source_offset_r') != offset_r):
update_coeffs = True
self.options['source_offset_r'] = offset_r
if (offset_theta is not None) and (self.options.get('source_offset_theta') != offset_theta):
update_coeffs = True
self.options['source_offset_theta'] = offset_theta
if (bar_offset is not None) and (self.options.get('bar_offset') != bar_offset):
update_coeffs = True
self.options['bar_offset'] = bar_offset
# Jitter
if (jitter is not None) and (self.options.get('jitter') != jitter):
update_coeffs = True
self.options['jitter'] = jitter
if (jitter_sigma is not None) and (self.options.get('jitter_sigma') != jitter_sigma):
update_coeffs = True
self.options['jitter_sigma'] = jitter_sigma
# Misecellaneous
if (npsf is not None) and (self.npsf != npsf):
update_coeffs = True
self.npsf = npsf
if (ndeg is not None) and (self.ndeg != ndeg):
update_coeffs = True
self.ndeg = ndeg
if (quick is not None) and (self.quick != quick):
update_coeffs = True
self.quick = quick
if (use_legendre is not None) and (self.use_legendre != use_legendre):
update_coeffs = True
self.use_legendre = use_legendre
# Detector update
if detector is not None:
update_coeffs = True
self.detector = get_detname(detector)
self.update_detectors()
# Regenerate PSF coefficients
if update_coeffs:
del self.psf_coeff, self.psf_coeff_header
save = True if save is None else save
self.gen_psf_coeff(save=save, force=force, nproc=nproc, **kwargs)
# Update drift, field, and mask-dependent coefficients
if self._psf_coeff_mod['wfe_drift'] is not None:
self.gen_wfedrift_coeff()
if self._psf_coeff_mod['si_field'] is not None:
self.gen_wfefield_coeff()
if self._psf_coeff_mod['si_mask'] is not None:
self.gen_wfemask_coeff()
# Update bg class if filter or pupil mask is changed
if update_bg_coeffs:
self._update_bg_class()
@property
def psf_info(self):
"""PSF parameters"""
d_options = self.options
d = {
'fov_pix': self.fov_pix, 'oversample': self.oversample,
'npsf': self.npsf, 'ndeg': self.ndeg, 'include_si_wfe': self.include_si_wfe,
'include_distortions': self.include_distortions,
'jitter': d_options.get('jitter'), 'jitter_sigma': d_options.get('jitter_sigma'),
'offset_r': d_options.get('source_offset_r', 0), 'offset_theta': d_options.get('source_offset_theta', 0),
'bar_offset': d_options.get('bar_offset', None),
'pupil': self.pupil, 'pupilopd': self.pupilopd,
}
return d
@property
def multiaccum(self):
""":class:`multiaccum` object"""
return self.Detector.multiaccum
@property
def multiaccum_times(self):
"""Exposure timings in dictionary
t_frame : Time of a single frame.
t_group : Time of a single group (read frames + drop frames).
t_int : Photon collection time for a single ramp/integration.
t_int_tot1: Total time for all frames (reset+read+drop) in a first ramp.
t_int_tot2: Total time for all frames (reset+read+drop) in a subsequent ramp.
t_exp : Total photon collection time for all ramps.
t_acq : Total acquisition time to complete exposure with all overheads.
"""
return self.Detector.times_to_dict()
@property
def det_info(self):
"""Dictionary housing detector info parameters and keywords."""
return self._det_info
@property
def well_level(self):
"""Detector well level in units of electrons"""
return self.Detector.well_level
@property
def siaf_ap_names(self):
"""Give all possible SIAF aperture names"""
return list(self.siaf.apernames)
def get_siaf_apname(self):
"""Get SIAF aperture based on instrument settings"""
# Return already defined ap name
# if (self.siaf_ap is not None) and (not override):
# return self.siaf_ap.AperName
# else:
detid = self.Detector.detid
wind_mode = self.Detector.wind_mode
is_lyot = self.is_lyot
is_coron = self.is_coron
is_grism = self.is_grism
pupil_mask = self.pupil_mask
if self.channel=='long' or self.channel=='LW':
channel = 'LW'
else:
channel = 'SW'
# Time series filters
ts_filters = ['F277W','F356W','F444W','F322W2']
# Coronagraphic bar filters
swb_filters = ['F182M','F187N','F210M','F212N','F200W']
lwb_filters = [
'F250M','F300M','F277W','F335M','F360M',
'F356W','F410M','F430M','F460M','F480M','F444W'
]
# Coronagraphy
if is_coron:
wstr = 'FULL_' if wind_mode=='FULL' else ''
key = '<KEY>'.format(detid,wstr,self.image_mask)
if ('WB' in self.image_mask) and (self.module=='A') and (self.filter in swb_filters+lwb_filters):
key = key + '_{}'.format(self.filter)
if wind_mode=='STRIPE':
key = None
# Just Lyot stop without masks, assuming TA aperture
elif is_lyot: #and self.ND_acq:
tastr = 'TA' if self.ND_acq else 'FSTA'
key = 'NRC{}_{}'.format(detid,tastr)
if ('CIRC' in pupil_mask) and ('SW' in channel):
key = key + 'MASK210R'
elif ('CIRC' in pupil_mask) and ('LW' in channel):
key = key + 'MASK430R' if ('F4' in self.filter) else key + 'MASK335R'
elif ('WEDGE' in pupil_mask) and ('SW' in channel):
key = key + 'MASKSWB'
elif ('WEDGE' in pupil_mask) and ('LW' in channel):
key = key + 'MASKLWB'
# Time series grisms
elif is_grism and ('GRISMR' in pupil_mask) and (self.filter in ts_filters):
if wind_mode=='FULL':
key = f'NRC{detid}_GRISM_{self.filter}'
elif wind_mode=='STRIPE':
key = 'NRC{}_GRISM{}_{}'.format(detid,self.det_info['ypix'],self.filter)
else:
key = None
# SW Time Series with LW grism
elif wind_mode=='STRIPE':
key = 'NRC{}_GRISMTS{:.0f}'.format(detid,self.det_info['ypix'])
# WFSS
elif is_grism and (wind_mode=='FULL'):
key = '<KEY>'.format(detid, pupil_mask)
# Subarrays
elif wind_mode=='WINDOW':
key = 'NRC{}_SUB{}P'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAPSIMG{}'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAGRISMTS{}'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAGRISMTS_SCI_{}'.format(detid,self.filter)
if key not in self.siaf_ap_names:
key = 'NRC{}_SUB{}'.format(detid,self.det_info['xpix'])
# Full frame generic
elif wind_mode=='FULL':
key = 'NRC{}_FULL'.format(detid)
else:
key = None
# Check if key exists
if key in self.siaf_ap_names:
_log.info('Suggested SIAF aperture name: {}'.format(key))
return key
else:
_log.warning("Suggested SIAF aperture name '{}' is not defined".format(key))
return None
def get_subarray_name(self, apname=None):
"""Get JWST NIRCam subarray name"""
if apname is None:
apname = self.get_siaf_apname()
pupil_mask = self.pupil_mask
image_mask = self.image_mask
module = self.module
detid = self.Detector.detid
wind_mode = self.Detector.wind_mode
ypix = self.det_info['ypix']
is_lyot = self.is_lyot
is_coron = self.is_coron
is_grism = self.is_grism
is_ndacq = self.ND_acq
if 'FULL' in wind_mode:
subarray_name = 'FULLP' if apname[-1] == 'P' else 'FULL'
elif 'STRIPE' in wind_mode:
subarray_name = f'SUBGRISM{ypix}'
elif is_coron:
sub_str = f'SUB{ypix}'
mask_str = image_mask[4:]
if ('335R' in image_mask) and (module == 'A'):
subarray_name = sub_str + module
else:
subarray_name = sub_str + module + mask_str
# Just Lyot stop without masks, assuming TA aperture
elif is_lyot:
mask_str = image_mask[4:]
# Faint source TA
if not is_ndacq:
subarray_name = 'SUBFS' + module + mask_str
elif 'LWB' in image_mask: # ND TA
if 'LWBL' in apname:
subarray_name = 'SUBND' + module + 'LWBL'
else:
subarray_name = 'SUBND' + module + 'LWBS'
elif 'SWB' in image_mask: # ND TA
if 'SWBS' in apname:
subarray_name = 'SUBND' + module + 'LWBS'
else:
subarray_name = 'SUBND' + module + 'LWBL'
else:
subarray_name = 'SUBND' + module + mask_str
else:
subarray_name = f'SUB{ypix}P' if apname[-1] == 'P' else f'SUB{ypix}'
# TODO: Grism TS TA, Fine phasing (FP), and DHS
return subarray_name
def update_from_SIAF(self, apname, pupil_mask=None, **kwargs):
"""Update detector properties based on SIAF aperture"""
if apname is None:
_log.warn('update_from_SIAF: Input apname was None. Returning...')
return
if not (apname in self.siaf_ap_names):
# raise ValueError(f'Cannot find {apname} in siaf.apernames list.')
_log.warn(f'update_from_SIAF: Cannot find {apname} in siaf.apernames list. Returing...')
return
if ('NRCALL' in apname) or ('NRCAS' in apname) or ('NRCBS' in apname):
raise ValueError(f'{apname} is not valid. Single detector apertures only.')
# Convert SCA name to detector ID
scaname = apname[0:5]
module = scaname[3]
channel = 'LW' if scaname[-1]=='5' else 'SW'
detid = 480 + int(scaname[4]) if module=='A' else 485 + int(scaname[4])
siaf_ap = self.siaf[apname]
xpix = int(siaf_ap.XSciSize)
ypix = int(siaf_ap.YSciSize)
if (xpix >= 2048) and (ypix>=2048):
wind_mode = 'FULL'
elif (xpix >= 2048):
wind_mode = 'STRIPE'
else:
wind_mode = 'WINDOW'
# Get lower left corner from siaf info
# This is in full frame detector coordinates
x0, y0 = np.array(siaf_ap.dms_corner()) - 1
# Update pupil and mask info
image_mask = None
ND_acq = False
filter = None
# Coronagraphic mask observations
if 'MASK' in apname:
# Set default pupil
if pupil_mask is None:
pupil_mask = 'WEDGELYOT' if 'WB' in apname else 'CIRCLYOT'
# Set mask occulter for all full arrays (incl. TAs) and science subarrays
# Treats full array TAs like a full coronagraphic observation
if ('FULL' in apname) or ('_MASK' in apname):
if ('MASKSWB' in apname):
image_mask = 'MASKSWB'
elif ('MASKLWB' in apname):
image_mask = 'MASKLWB'
elif ('MASK210R' in apname):
image_mask = 'MASK210R'
elif ('MASK335R' in apname):
image_mask = 'MASK335R'
elif ('MASK430R' in apname):
image_mask = 'MASK430R'
if 'TA' in apname:
_log.info('Full TA apertures are treated similar to coronagraphic observations.')
_log.info("To calculate SNR, self.update_psf_coeff(image_mask='CLEAR') and set self.ND_acq.")
elif '_TAMASK' in apname:
# For small TA subarray, turn off mask and enable ND square
image_mask = None
ND_acq = True
elif '_FSTAMASK in apname':
# Not really anything to do here
image_mask = None
else:
_log.warn(f'No mask setting for {apname}')
# Grism observations
elif 'GRISM' in apname:
if ('_GRISMC' in apname): # GRISMC WFSS
pupil_mask = 'GRISMC' if pupil_mask is None else pupil_mask
elif ('_GRISMR' in apname): # GRISMR WFSS
pupil_mask = 'GRISMR' if pupil_mask is None else pupil_mask
elif ('_GRISMTS' in apname): # SW apertures in parallel w/ LW GRISMTS
pupil_mask = 'WLP8' if pupil_mask is None else pupil_mask
elif ('_TAGRISMTS' in apname): # GRISM TA have no pupil
pupil_mask = None
elif ('_GRISM' in apname): # Everything else is GRISMR
pupil_mask = 'GRISMR' if pupil_mask is None else pupil_mask
else:
_log.warn(f'No grism setting for {apname}')
# Look for filter specified in aperture name
if ('_F1' in apname) or ('_F2' in apname) or ('_F3' in apname) or ('_F4' in apname):
# Find all instances of "_"
inds = [pos for pos, char in enumerate(apname) if char == '_']
# Filter is always appended to end, but can have different string sizes (F322W2)
filter = apname[inds[-1]+1:]
# Save to internal variables
self.pupil_mask = pupil_mask
self.image_mask = image_mask
self._ND_acq = ND_acq
# Filter stuff
# Defaults
fsw_def, flw_def = ('F210M', 'F430M')
if filter is not None:
self.filter = filter
try:
if self._filter is None:
self._filter = fsw_def if 'SW' in channel else flw_def
except AttributeError:
self._filter = fsw_def if 'SW' in channel else flw_def
# If filter doesn't make sense with channel
if channel=='SW' and self._filter not in self._filters_sw:
self._filter = fsw_def
if channel=='LW' and self._filter not in self._filters_lw:
self._filter = flw_def
self._validate_wheels()
# Update detector
det_kwargs = {'xpix': xpix, 'ypix': ypix, 'x0': x0, 'y0': y0, 'wind_mode':wind_mode}
kwargs = merge_dicts(kwargs, det_kwargs)
self.detector = get_detname(scaname)
self.update_detectors(**kwargs)
# Update aperture
self.siaf_ap = siaf_ap
def calc_psf_from_coeff(self, sp=None, return_oversample=True, return_hdul=True,
wfe_drift=None, coord_vals=None, coord_frame='tel', use_bg_psf=False, **kwargs):
kwargs['sp'] = sp
kwargs['return_oversample'] = return_oversample
kwargs['return_hdul'] = return_hdul
kwargs['wfe_drift'] = wfe_drift
kwargs['coord_vals'] = coord_vals
kwargs['coord_frame'] = coord_frame
if use_bg_psf:
return self._nrc_bg.calc_psf_from_coeff(**kwargs)
else:
return super().calc_psf_from_coeff(**kwargs)
def calc_psf(self, sp=None, return_oversample=True, return_hdul=True,
wfe_drift=None, coord_vals=None, coord_frame='tel', use_bg_psf=False,
**kwargs):
kwargs['sp'] = sp
kwargs['return_oversample'] = return_oversample
kwargs['return_hdul'] = return_hdul
kwargs['wfe_drift'] = wfe_drift
kwargs['coord_vals'] = coord_vals
kwargs['coord_frame'] = coord_frame
_log.info("Calculating PSF from WebbPSF parent function")
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
if use_bg_psf:
res = self._nrc_bg.calc_psf(**kwargs)
else:
res = super().calc_psf(**kwargs)
setup_logging(log_prev, verbose=False)
return res
def sat_limits(self, sp=None, bp_lim=None, units='vegamag', well_frac=0.8,
ngroup=None, trim_psf=33, verbose=False, **kwargs):
"""Saturation limits.
Generate the limiting magnitude (80% saturation) with the current instrument
parameters (filter and ramp settings) assuming some spectrum. If no spectrum
is defined, then a G2V star is assumed.
The user can also define a separate bandpass in which to determine the
limiting magnitude that will cause the current NIRCam bandpass to saturate.
Parameters
----------
sp : :mod:`pysynphot.spectrum`
Spectrum to determine saturation limit.
bp_lim : :mod:`pysynphot.obsbandpass`
Bandpass to report limiting magnitude.
units : str
Output units (defaults to vegamag).
well_frac : float
Fraction of full well to consider 'saturated'.
ngroup : int, None
Option to specify the number of groups to determine
integration time. If not set, then the default is to
use those specified in the Detectors class. Can set
ngroup=0 for the so-called Zero Frame in the event
there are multiple reads per group.
trim_psf : int, None
Option to crop the PSF coefficient around the brightest pixel.
For PSFs with large `fov_pix` values, this option helps speed
up the saturation limit calculation. Afterall, we're usually
only interested in the brightest pixel when calculating
saturation limits. Set to `None` to use the 'fov_pix' value.
Default = 33 (detector pixels).
verbose : bool
Print result details.
Example
-------
>>> nrc = pynrc.NIRCam('F430M') # Initiate NIRCam observation
>>> sp_A0V = pynrc.stellar_spectrum('A0V') # Define stellar spectral type
>>> bp_k = S.ObsBandpass('steward,k') # Pysynphot K-Band bandpass
>>> bp_k.name = 'K-Band'
>>> mag_lim = nrc.sat_limits(sp_A0V, bp_k, verbose=True)
Returns K-Band Limiting Magnitude for F430M assuming A0V source.
"""
from webbpsf_ext.psfs import gen_image_from_coeff
from copy import deepcopy
bp_lim = self.bandpass if bp_lim is None else bp_lim
quiet = False if verbose else True
# Total time spent integrating minus the reset frame
if ngroup is None:
t_sat = self.multiaccum_times['t_int']
else:
t_frame = self.multiaccum_times['t_frame']
if ngroup==0:
t_sat = t_frame
else:
ma = self.multiaccum
nf = ma.nf; nd1 = ma.nd1; nd2 = ma.nd2
t_sat = (nd1 + ngroup*nf + (ngroup-1)*nd2) * t_frame
# Full well level
well_level = self.well_level
# kwargs = merge_dicts(kwargs, self._psf_info)
# We don't necessarily need the entire image, so cut down to size
# 1. Create a temporary image at bp avg wavelength (monochromatic)
# 2. Find x,y position of max PSF
# 3. Cut out postage stamp region around that PSF coeff
psf_coeff = self.psf_coeff
psf_coeff_hdr = deepcopy(self.psf_coeff_header)
fov_pix, osamp = (psf_coeff_hdr['FOVPIX'], psf_coeff_hdr['OSAMP'])
if (trim_psf is not None) and (trim_psf < fov_pix):
# Quickly create a temporary PSF to find max value location
wtemp = np.array([bp_lim.wave[0], bp_lim.avgwave(), bp_lim.wave[-1]])
ttemp = np.array([bp_lim.sample(w) for w in wtemp])
bptemp = S.ArrayBandpass(wave=wtemp, throughput=ttemp)
# psf_temp, psf_temp_over = gen_image_coeff(bptemp, coeff=psf_coeff, coeff_hdr=psf_coeff_hdr, \
# fov_pix=fov_pix, oversample=osamp, return_oversample=True)
res = gen_image_from_coeff(self, psf_coeff, psf_coeff_hdr, nwaves=3, return_oversample=True)
if self.is_grism:
_, psf_temp_over = res
else:
psf_temp_over = res
# Amount to shift PSF
yind, xind = np.argwhere(psf_temp_over==psf_temp_over.max())[0]
ypix, xpix = psf_temp_over.shape
ysh = int(yind - ypix/2)
xsh = int(xind - xpix/2)
fov_pix_over = trim_psf * osamp
coeff = []
for im in psf_coeff:
im = fshift(im, -xsh, -ysh, interp='cubic')
im = pad_or_cut_to_size(im, (fov_pix_over,fov_pix_over))
coeff.append(im)
psf_coeff = np.array(coeff)
psf_coeff_hdr['FOVPIX'] = trim_psf
satlim = saturation_limits(self, psf_coeff=psf_coeff, psf_coeff_hdr=psf_coeff_hdr, sp=sp, units=units,
bp_lim=bp_lim, int_time=t_sat, full_well=well_level, well_frac=well_frac,
verbose=verbose, **kwargs)
return satlim
def saturation_levels(self, sp, full_size=True, ngroup=2, image=None, **kwargs):
""" Saturation levels
Create image showing level of saturation for each pixel.
Can either show the saturation after one frame (default)
or after the ramp has finished integrating (ramp_sat=True).
Parameters
----------
sp : :mod:`pysynphot.spectrum`
A pysynphot spectral object (normalized).
full_size : bool
Expand (or contract) to size of detector array?
If False, use fov_pix size.
ngroup : int
How many group times to determine saturation level?
If this number is higher than the total groups in ramp,
then a warning is produced. The default is ngroup=2,
A value of 0 corresponds to the so-called "zero-frame,"
which is the very first frame that is read-out and saved
separately. This is the equivalent to ngroup=1 for RAPID
and BRIGHT1 observations.
image : ndarray
Rather than generating an image on the fly, pass a pre-computed
slope image. Overrides `sp` and `full_size`
"""
assert ngroup >= 0
is_grism = self.is_grism
t_frame = self.multiaccum_times['t_frame']
t_int = self.multiaccum_times['t_int']
if ngroup==0:
t_sat = t_frame
else:
ma = self.multiaccum
nf = ma.nf; nd1 = ma.nd1; nd2 = ma.nd2
t_sat = (nd1 + ngroup*nf + (ngroup-1)*nd2) * t_frame
if t_sat>t_int:
_log.warning('ngroup*t_group is greater than t_int.')
# Slope image of input
if image is not None:
return image * t_sat / self.well_level
else:
image = self.calc_psf_from_coeff(sp=sp, return_oversample=False, return_hdul=False)
if is_grism:
wave, image = image
if full_size:
shape = (self.det_info['ypix'], self.det_info['xpix'])
image = pad_or_cut_to_size(image, shape)
# Add in zodi background to full image
image += self.bg_zodi(**kwargs)
# Well levels after "saturation time"
sat_level = image * t_sat / self.well_level
if is_grism:
return (wave, sat_level)
else:
return sat_level
def sensitivity(self, nsig=10, units=None, sp=None, verbose=False, **kwargs):
"""Sensitivity limits.
Convenience function for returning the point source (and surface brightness)
sensitivity for the given instrument setup. See `sensitivities` function
for more details.
Parameters
----------
nsig : int, float
Desired nsigma sensitivity (default 10).
units : str
Output units (defaults to uJy for grisms, nJy for imaging).
sp : :mod:`pysynphot.spectrum`
Input spectrum to use for determining sensitivity.
Only the spectral shape matters, unless ``forwardSNR=True``.
verbose : bool
Print result details.
Keyword Args
------------
forwardSNR : bool
Find the SNR of the input spectrum instead of sensitivity.
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
ideal_Poisson : bool
If set to True, use total signal for noise estimate,
otherwise MULTIACCUM equation is used.
rad_EE : float
Extraction aperture radius (in pixels) for imaging mode.
dw_bin : float
Delta wavelength for spectral sensitivities (grisms & DHS).
ap_spec : int, float
Instead of dw_bin, specify the spectral extraction aperture in pixels.
Takes priority over dw_bin. Value will get rounded up to nearest int.
"""
tf = self.multiaccum_times['t_frame']
det = self.Detector
ktc = det.ktc
rn = det.read_noise
idark = det.dark_current
p_excess = det.p_excess
pupil_mask = '' if self.pupil_mask is None else self.pupil_mask
kw1 = self.multiaccum.to_dict()
kw2 = {'rn':rn, 'ktc':ktc, 'idark':idark, 'p_excess':p_excess}
kwargs = merge_dicts(kwargs,kw1,kw2)
if 'ideal_Poisson' not in kwargs.keys():
kwargs['ideal_Poisson'] = True
# Always use the bg coeff
psf_coeff = self._nrc_bg.psf_coeff
psf_coeff_hdr = self._nrc_bg.psf_coeff_header.copy()
fov_pix, osamp = (psf_coeff_hdr['FOVPIX'], psf_coeff_hdr['OSAMP'])
# We don't necessarily need the entire image, so cut down to size for speed
if (not ('WEAK LENS' in pupil_mask)) and (fov_pix > 33):
fov_pix = 33
fov_pix_over = fov_pix * osamp
psf_coeff = np.array([pad_or_cut_to_size(im, (fov_pix_over,fov_pix_over)) for im in psf_coeff])
kwargs['fov_pix'] = fov_pix
psf_coeff_hdr['FOVPIX'] = fov_pix
bglim = sensitivities(self, psf_coeff=psf_coeff, psf_coeff_hdr=psf_coeff_hdr,
sp=sp, units=units, nsig=nsig, tf=tf, verbose=verbose, **kwargs)
return bglim
def bg_zodi(self, zfact=None, **kwargs):
"""Zodiacal background flux.
There are options to call `jwst_backgrounds` to obtain better
predictions of the background. Specify keywords `ra`, `dec`,
and `thisday` to use `jwst_backgrounds`.
Returned values are in units of e-/sec/pixel
Parameters
----------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
Keyword Args
------------
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation.
If not given, will use the average of visible calendar days.
Notes
-----
Representative values for zfact:
* 0.0 - No zodiacal emission
* 1.0 - Minimum zodiacal emission from JWST-CALC-003894
* 1.2 - Required NIRCam performance
* 2.5 - Average (default)
* 5.0 - High
* 10.0 - Maximum
"""
# Dark image
if self.is_dark:
return 0
bp = self.bandpass
waveset = bp.wave
sp_zodi = zodi_spec(zfact, **kwargs)
obs_zodi = S.Observation(sp_zodi, bp, waveset)
fzodi_pix = obs_zodi.countrate() * (self.pixelscale/206265.0)**2
# Recommend a zfact value if ra, dec, and thisday specified
if 'ra' in kwargs.keys():
sp_zodi_temp = zodi_spec(zfact=1)
obs_zodi_temp = S.Observation(sp_zodi_temp, bp, waveset)
fzodi_pix_temp = obs_zodi_temp.countrate() * (self.pixelscale/206265.0)**2
zf_rec = fzodi_pix / fzodi_pix_temp
str1 = 'Using ra,dec,thisday keywords can be relatively slow. \n'
str2 = '\tFor your specified loc and date, we recommend using zfact={:.1f}'.format(zf_rec)
_log.warn(str1 + str2)
# Don't forget about Lyot mask attenuation (not in bandpass throughput)
if self.is_lyot:
fzodi_pix *= 0.19
return fzodi_pix
def bg_zodi_image(self, zfact=None, frame='sci', **kwargs):
"""Zodiacal light image
Returns an image of background Zodiacal light emission
in e-/sec in specified coordinate frame.
Parameters
----------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
frame : str
Return in 'sci' or 'det' coordinates?
Keyword Args
------------
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation.
If not given, will use the average of visible calendar days.
Notes
-----
Representative values for zfact:
* 0.0 - No zodiacal emission
* 1.0 - Minimum zodiacal emission from JWST-CALC-003894
* 1.2 - Required NIRCam performance
* 2.5 - Average (default)
* 5.0 - High
* 10.0 - Maximum
"""
detid = self.Detector.detid
x0, y0 = (self.det_info['x0'], self.det_info['y0'])
xpix, ypix = (self.det_info['xpix'], self.det_info['ypix'])
# Dark image
if self.is_dark:
return np.zeros([ypix,xpix])
bp = self.bandpass
waveset = bp.wave
sp_zodi = zodi_spec(zfact, **kwargs)
obs_zodi = S.Observation(sp_zodi, bp, waveset)
fzodi_pix = obs_zodi.countrate() * (self.pixelscale/206265.0)**2
# Get equivalent
if 'ra' in kwargs.keys():
sp_zodi_temp = zodi_spec(zfact=1)
obs_zodi_temp = S.Observation(sp_zodi_temp, bp, waveset)
fzodi_pix_temp = obs_zodi_temp.countrate() * (self.pixelscale/206265.0)**2
zfact = fzodi_pix / fzodi_pix_temp
_ = kwargs.pop('ra')
_ = kwargs.pop('dec')
_ = kwargs.pop('thisday')
filter = self.filter
pupil_mask = self.pupil_mask
if self.is_grism:
# sci coords
im_bg = grism_background_image(filter, pupil=pupil_mask, module=self.module, sp_bg=sp_zodi, **kwargs)
# Convert to det coords and crop
im_bg = sci_to_det(im_bg, detid)
im_bg = im_bg[y0:y0+ypix, x0:x0+xpix]
# Back to sci coords
im_bg = det_to_sci(im_bg, detid)
elif self.is_coron or self.coron_substrate:
# Create full image, then crop based on detector configuration
im_bg = build_mask_detid(detid, oversample=1, pupil=pupil_mask, filter=self.filter)
if im_bg is None:
# In the event the specified detid has no coronagraphic mask
# This includes ['A1', 'A3', 'B2', 'B4']
im_bg = np.ones([ypix,xpix])
else:
# Convert to det coords and crop
im_bg = sci_to_det(im_bg, detid)
im_bg = im_bg[y0:y0+ypix, x0:x0+xpix]
# Back to sci coords and multiply by e-/sec/pix
im_bg = det_to_sci(im_bg, detid)
# Multiply by e-/sec/pix
im_bg *= self.bg_zodi(zfact, **kwargs)
else:
# No spatial structures for direct imaging an certain Lyot masks.
im_bg = np.ones([ypix,xpix]) * self.bg_zodi(zfact, **kwargs)
# Clear reference pixels
# im_bg = sci_to_det(im_bg, detid)
# mask_ref = self.Detector.mask_ref
# im_bg[mask_ref] = 0
# im_bg = det_to_sci(im_bg, detid)
if frame=='det':
return sci_to_det(im_bg, detid)
elif frame=='sci':
return im_bg
else:
raise ValueError(f"frame {frame} not recognized. Use either 'sci' or 'det'.")
def ramp_optimize(self, sp, sp_bright=None, is_extended=False, patterns=None,
snr_goal=None, snr_frac=0.02, tacq_max=None, tacq_frac=0.1,
well_frac_max=0.8, nint_min=1, nint_max=5000, ng_min=2, ng_max=None,
return_full_table=False, even_nints=False, verbose=False, **kwargs):
"""Optimize ramp settings.
Find the optimal ramp settings to observe a spectrum based on input constraints.
This function quickly runs through each detector readout pattern and
calculates the acquisition time and SNR for all possible settings of NINT
and NGROUP that fulfill the SNR requirement (and other constraints).
The final output table is then filtered, removing those exposure settings
that have the same exact acquisition times but worse SNR. Further "obvious"
comparisons are done that exclude settings where there is another setting
that has both better SNR and less acquisition time. The best results are
then sorted by an efficiency metric (SNR / sqrt(acq_time)). To skip filtering
of results, set return_full_table=True.
The result is an AstroPy Table.
Parameters
----------
sp : :mod:`pysynphot.spectrum`
A pysynphot spectral object to calculate SNR.
sp_bright : :mod:`pysynphot.spectrum`, None
Same as sp, but optionally used to calculate the saturation limit
(treated as brightest source in field). If a coronagraphic mask
observation, then this source is assumed to be occulted and
sp is fully unocculted.
is_extended : bool
Treat sp source as extended object, then in units/arcsec^2
snr_goal : float
Minimum required SNR for source. For grism, this is the average
SNR for all wavelength.
snr_frac : float
Give fractional buffer room rather than strict SNR cut-off.
tacq_max : float
Maximum amount of acquisition time in seconds to consider.
tacq_frac : float
Fractional amount of time to consider exceeding tacq_max.
patterns : numpy array
Subset of MULTIACCUM patterns to check, otherwise check all.
nint_min/max : int
Min/max number of desired integrations.
ng_min/max : int
Min/max number of desired groups in a ramp.
well_frac_max : float
Maximum level that the pixel well is allowed to be filled.
Fractions greater than 1 imply hard saturation, but the reported
SNR will not be aware of any saturation that may occur to sp.
even_nints : bool
Return only the even NINTS
return_full_table : bool
Don't filter or sort the final results (ingores event_ints).
verbose : bool
Prints out top 10 results.
Keyword Args
------------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation. If not given, will use the
average of visible calendar days.
ideal_Poisson : bool
Use total signal for noise estimate?
Otherwise MULTIACCUM equation is used.
Default = True
rad_EE : int
Extraction aperture radius (in pixels) for imaging mode.
dw_bin : float
Delta wavelength to calculate spectral sensitivities for
grisms and DHS.
ap_spec : float, int
Instead of dw_bin, specify the spectral extraction aperture
in pixels. Takes priority over dw_bin. Value will get rounded
up to nearest int.
Note
----
The keyword arguments ra, dec, thisday are not recommended for use
given the amount of time it takes to query the web server.
Instead, use :meth:`bg_zodi` to match a zfact estimate.
Returns
-------
astropy table
A sorted and filtered table of ramp options.
"""
def parse_snr(snr, grism_obs, ind_snr):
if grism_obs:
res = snr['snr']
return np.median(res)
else:
return snr[ind_snr]['snr']
pupil_mask = self.pupil_mask
grism_obs = self.is_grism
dhs_obs = (pupil_mask is not None) and ('DHS' in pupil_mask)
det_params_orig = self.det_info.copy()
if dhs_obs:
raise NotImplementedError('DHS has yet to be fully included.')
if grism_obs and is_extended:
raise NotImplementedError('Extended objects not implemented for grism observations.')
if (snr_goal is not None) and (tacq_max is not None):
raise ValueError('Keywords snr_goal and tacq_max are mutually exclusive.')
if (snr_goal is None) and (tacq_max is None):
raise ValueError('Must set either snr_goal or tacq_max.')
# Brightest source in field
if sp_bright is None:
sp_bright = sp
gen_psf = self.calc_psf_from_coeff
kw_gen_psf = {'return_oversample': False,'return_hdul': False}
# Generate PSFs for faint and bright objects and get max pixel flux
# Only necessary for point sources
if is_extended:
ind_snr = 1
obs = S.Observation(sp, self.bandpass, binset=self.bandpass.wave)
psf_faint = obs.countrate() * self.pixelscale**2
psf_bright = gen_psf(sp=sp_bright, use_bg_psf=False, **kw_gen_psf)
pix_count_rate = np.max([psf_bright.max(), psf_faint])
else:
ind_snr = 0
if grism_obs:
_, psf_bright = gen_psf(sp=sp_bright, use_bg_psf=False, **kw_gen_psf)
_, psf_faint = gen_psf(sp=sp, use_bg_psf=True, **kw_gen_psf)
else:
psf_bright = gen_psf(sp=sp_bright, use_bg_psf=False, **kw_gen_psf)
psf_faint = gen_psf(sp=sp, use_bg_psf=True, **kw_gen_psf)
pix_count_rate = np.max([psf_bright.max(), psf_faint.max()])
image = self.sensitivity(sp=sp, forwardSNR=True, return_image=True, **kwargs)
# Correctly format patterns
pattern_settings = self.multiaccum._pattern_settings
if patterns is None:
patterns = list(pattern_settings.keys())
if not isinstance(patterns, list):
patterns = [patterns]
m = np.zeros(len(patterns))
s = np.zeros(len(patterns))
for i,patt in enumerate(patterns):
v1,v2,v3 = pattern_settings.get(patt)
m[i] = v1
s[i] = v2
# Sort by nf (m+s) then by m
isort = np.lexsort((m,m+s))
patterns = list(np.array(patterns)[isort])
patterns.sort()
log_prev = conf.logging_level
setup_logging("WARN", verbose=False)
rows = []
if tacq_max is not None:
# Cycle through each readout pattern
for read_mode in patterns:
if verbose: print(read_mode)
# Maximum allowed groups for given readout pattern
_,_,ngroup_max = pattern_settings.get(read_mode)
if ng_max is not None:
ngroup_max = ng_max
nng = ngroup_max - ng_min + 1
if nng>30:
_log.warning(f'Cycling through {nng} NGROUPs. This may take a while!')
for ng in range(ng_min,ngroup_max+1):
self.update_detectors(read_mode=read_mode, ngroup=ng, nint=1)
mtimes = self.multiaccum_times
# Get saturation level of observation
# Total time spent integrating minus the reset frame
int_time = mtimes['t_int']
well_frac = pix_count_rate * int_time / self.well_level
# If above well_frac_max, then this setting is invalid
# Also, all subsequent values of ng will be too high
# so just break out of for loop.
if well_frac > well_frac_max:
break
# Approximate integrations needed to obtain required t_acq
nint1 = int(((1-tacq_frac)*tacq_max) / mtimes['t_acq'])
nint2 = int(((1+tacq_frac)*tacq_max) / mtimes['t_acq'] + 0.5)
nint1 = np.max([nint1,nint_min])
nint2 = np.min([nint2,nint_max])
nint_all = np.arange(nint1, nint2+1)
narr = len(nint_all)
# Sometimes there are a lot of nint values to check
# Let's pair down to <5 per ng
if narr>5:
i1 = int(narr/2-2)
i2 = i1 + 5
nint_all = nint_all[i1:i2]
#print(len(nint_all))
for nint in nint_all:
if nint > nint_max:
break
self.update_detectors(nint=nint)
mtimes = self.multiaccum_times
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
rows.append((read_mode, ng, nint, mtimes['t_int'], mtimes['t_exp'], \
mtimes['t_acq'], snr, well_frac))
elif snr_goal is not None:
for i,read_mode in enumerate(patterns):
if verbose: print(read_mode)
# Maximum allowed groups for given readout pattern
_,_,ngroup_max = pattern_settings.get(read_mode)
if ng_max is not None:
ngroup_max = ng_max #np.min([ng_max,ngroup_max])
nng = ngroup_max - ng_min + 1
if nng>20:
_log.warning(f'Cycling through {nng} NGROUPs. This may take a while!')
ng_saved = False
for ng in range(ng_min,ngroup_max+1):
self.update_detectors(read_mode=read_mode, ngroup=ng, nint=1)
mtimes = self.multiaccum_times
# Get saturation level of observation
int_time = mtimes['t_int']
well_frac = pix_count_rate * int_time / self.well_level
# If above well_frac_max, then this setting is invalid
if well_frac > well_frac_max:
continue
# Get SNR (assumes no saturation)
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
# Approximate integrations needed to get to required SNR
nint = int((snr_goal / snr)**2)
nint = np.max([nint_min,nint])
if nint>nint_max:
continue
# Find NINT with SNR > 0.95 snr_goal
self.update_detectors(nint=nint)
mtimes = self.multiaccum_times
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
while (snr<((1-snr_frac)*snr_goal)) and (nint<=nint_max):
nint += 1
self.update_detectors(nint=nint)
mtimes = self.multiaccum_times
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
# Skip if NINT
if (nint > nint_max):# or :
continue
# We want to make sure that at least one NINT setting is saved
# if the resulting SNR is higher than our stated goal.
if (snr > ((1+snr_frac)*snr_goal)) and ng_saved:
continue
rows.append((read_mode, ng, nint, mtimes['t_int'], mtimes['t_exp'], \
mtimes['t_acq'], snr, well_frac))
ng_saved = True
# Increment NINT until SNR > 1.05 snr_goal
# Add each NINT to table output
while (snr < ((1+snr_frac)*snr_goal)) and (nint<=nint_max):
nint += 1
if (nint > nint_max): break # double-check
self.update_detectors(nint=nint)
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
mtimes = self.multiaccum_times
rows.append((read_mode, ng, nint, mtimes['t_int'], mtimes['t_exp'], \
mtimes['t_acq'], snr, well_frac))
# Return to detector mode to original parameters
self.update_detectors(**det_params_orig)
setup_logging(log_prev, verbose=False)
names = ('Pattern', 'NGRP', 'NINT', 't_int', 't_exp', 't_acq', 'SNR', 'Well')
if len(rows)==0:
_log.warning('No ramp settings allowed within constraints! Reduce constraints.')
return Table(names=names)
# Place rows into a AstroPy Table
t_all = Table(rows=rows, names=names)
t_all['Pattern'].format = '<10'
t_all['t_int'].format = '9.2f'
t_all['t_exp'].format = '9.2f'
t_all['t_acq'].format = '9.2f'
t_all['SNR'].format = '8.1f'
t_all['Well'].format = '8.3f'
t_all['eff'] = t_all['SNR'] / np.sqrt(t_all['t_acq'])
# Round to 3 sig digits
t_all['eff'] = (1000*t_all['eff']).astype(int) / 1000.
t_all['eff'].format = '8.3f'
# Filter table?
if return_full_table:
# Sort by efficiency, then acq time
ind_sort = np.lexsort((t_all['t_acq'],1/t_all['eff']))
t_all = t_all[ind_sort]
if verbose:
print("Top 10 results sorted by 'efficiency' [SNR/sqrt(t_acq)]:")
print(t_all[0:10])
else:
t_all = table_filter(t_all, **kwargs)
ind_sort = np.lexsort((t_all['t_acq'],1/t_all['eff']))
t_all = t_all[ind_sort]
# Select only even integrations
if even_nints:
ind = (t_all['NINT'] % 2 == 0)
t_all = t_all[ind]
if verbose: print(t_all)
return t_all
def gen_psfs_over_fov(self, sptype='G0V', wfe_drift=0, osamp=1, npsf_per_full_fov=15,
return_coords=None, use_coeff=True, **kwargs):
"""Create PSF grid over full field of view
Wrapper around `calc_psfs_grid` that returns normalized PSFs across
the field of view.
Create a grid of PSFs across instrument aperture FoV. By default,
imaging observations will be for full detector FoV with regularly
spaced grid. Coronagraphic observations will cover nominal
coronagraphic mask region (usually 10s of arcsec) and will have
logarithmically spaced values where appropriate.
Parameters
==========
sptype : str
Spectral type, such as 'A0V' or 'K2III'.
wfe_drift : float
Desired WFE drift value relative to default OPD.
osamp : int
Sampling of output PSF relative to detector sampling.
npsf_per_full_fov : int
Number of PSFs across one dimension of the instrument's field of
view. If a coronagraphic observation, then this is for the nominal
coronagrahic field of view.
return_coords : None or str
Option to also return coordinate values in desired frame
('det', 'sci', 'tel', 'idl'). Output is then xvals, yvals, hdul_psfs.
use_coeff : bool
If True, uses `calc_psf_from_coeff`, other WebbPSF's built-in `calc_psf`.
Keyword Args
============
xsci_vals: None or ndarray
Option to pass a custom grid values along x-axis in 'sci' coords.
If coronagraph, this instead corresponds to coronagraphic mask axis,
which has a slight rotation in MIRI.
ysci_vals: None or ndarray
Option to pass a custom grid values along y-axis in 'sci' coords.
If coronagraph, this instead corresponds to coronagraphic mask axis,
which has a slight rotation in MIRI.
"""
# Create input spectrum that is star normalized by unit response
bp = self.bandpass
sp = stellar_spectrum(sptype, bp.unit_response(), 'flam', bp)
return self.calc_psfs_grid(sp=sp, wfe_drift=wfe_drift, osamp=osamp,
return_coords=return_coords, use_coeff=use_coeff,
npsf_per_full_fov=npsf_per_full_fov, **kwargs)
def _gen_obs_params(self, target_name, ra, dec, date_obs, time_obs, pa_v3=0,
siaf_ap_ref=None, xyoff_idl=(0,0), visit_type='SCIENCE', time_series=False,
time_exp_offset=0, segNum=None, segTot=None, int_range=None, filename=None, **kwargs):
""" Generate a simple obs_params dictionary
An obs_params dictionary is used to create a jwst data model (e.g., Level1bModel).
Additional **kwargs will add/update elements to the final output dictionary.
Parameters
==========
ra : float
RA in degrees associated with observation pointing
dec : float
RA in degrees associated with observation pointing
data_obs : str
YYYY-MM-DD
time_obs : str
HH:MM:SS
Keyword Arg
===========
pa_v3 : float
Telescope V3 position angle.
siaf_ap_ref : pysiaf Aperture
SIAF aperture class used for telescope pointing (if different than self.siaf_ap)
xyoff_idl : tuple, list
(x,y) offset in arcsec ('idl' coords) to dither observation
visit_type : str
'T_ACQ', 'CONFIRM', or 'SCIENCE'
time_series : bool
Is this a time series observation?
time_exp_offset : float
Exposure start time (in seconds) relative to beginning of observation execution.
segNum : int
The segment number of the current product. Only for TSO.
segTot : int
The total number of segments. Only for TSO.
int_range : list
Integration indices to use
filename : str or None
Name of output filename. If set to None, then auto generates a dummy name.
"""
from .simul.apt import create_obs_params
from .simul.dms import DMS_filename
filt = self.filter
pupil = 'CLEAR' if self.pupil_mask is None else self.pupil_mask
mask = 'None' if self.image_mask is None else self.image_mask
det = self.Detector
siaf_ap_obs = self.siaf_ap
if siaf_ap_ref is None:
siaf_ap_ref = self.siaf_ap
ra_dec = (ra, dec)
kwargs['target_name'] = target_name
kwargs['nexposures'] = 1
obs_params = create_obs_params(filt, pupil, mask, det, siaf_ap_ref, ra_dec, date_obs, time_obs,
pa_v3=pa_v3, siaf_ap_obs=siaf_ap_obs, xyoff_idl=xyoff_idl, time_exp_offset=time_exp_offset,
visit_type=visit_type, time_series=time_series, segNum=segNum, segTot=segTot, int_range=int_range,
filename=filename, **kwargs)
if filename is None:
obs_id_info = obs_params['obs_id_info']
detname = det.detid
filename = DMS_filename(obs_id_info, detname, segNum=segNum, prodType='uncal')
obs_params['filename'] = filename
return obs_params
def simulate_ramps(self, sp=None, im_slope=None, cframe='sci', nint=None,
do_dark=False, rand_seed=None, **kwargs):
""" Simulate Ramp Data
Create a series of ramp data based on the current NIRCam settings.
This method calls the :func:`gen_ramp` function, which in turn calls
the detector noise generator :func:`~pynrc.simul.simulate_detector_ramp`.
Parameters
----------
im_slope : numpy array, None
Pass the slope image directly. If not set, then a slope
image will be created from the input spectrum keyword. This
should include zodiacal light emission, but not dark current.
Make sure this array is in detector coordinates.
sp : :mod:`pysynphot.spectrum`, None
A pysynphot spectral object. If not specified, then it is
assumed that we're looking at blank sky.
cframe : str
Output coordinate frame, 'sci' or 'det'.
nint : None or int
Options to specify arbitrary number of integrations.
do_dark : bool
Make a dark ramp (ie., pupil_mask='FLAT'), no external flux.
Keyword Args
------------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation. If not given, will use the
average of visible calendar days.
return_full_ramp : bool
By default, we average groups and drop frames as specified in the
`det` input. If this keyword is set to True, then return all raw
frames within the ramp. The last set of `nd2` frames will be omitted.
out_ADU : bool
If true, divide by gain and convert to 16-bit UINT.
include_dark : bool
Add dark current?
include_bias : bool
Add detector bias?
include_ktc : bool
Add kTC noise?
include_rn : bool
Add readout noise per frame?
include_cpink : bool
Add correlated 1/f noise to all amplifiers?
include_upink : bool
Add uncorrelated 1/f noise to each amplifier?
include_acn : bool
Add alternating column noise?
apply_ipc : bool
Include interpixel capacitance?
apply_ppc : bool
Apply post-pixel coupling to linear analog signal?
include_refoffsets : bool
Include reference offsts between amplifiers and odd/even columns?
include_refinst : bool
Include reference/active pixel instabilities?
include_colnoise : bool
Add in column noise per integration?
col_noise : ndarray or None
Option to explicitly specifiy column noise distribution in
order to shift by one for subsequent integrations
amp_crosstalk : bool
Crosstalk between amplifiers?
add_crs : bool
Add cosmic ray events? See Robberto et al 2010 (JWST-STScI-001928).
cr_model: str
Cosmic ray model to use: 'SUNMAX', 'SUNMIN', or 'FLARES'.
cr_scale: float
Scale factor for probabilities.
apply_nonlinearity : bool
Apply non-linearity?
random_nonlin : bool
Add randomness to the linearity coefficients?
apply_flats: bool
Apply sub-pixel QE variations (crosshatching)?
latents : None or ndarray
(TODO) Apply persistence from previous integration.
"""
from .reduce.calib import nircam_cal
rng = np.random.default_rng(rand_seed)
det = self.Detector
nint = det.multiaccum.nint if nint is None else nint
pupil_mask = 'FLAT' if do_dark else self.pupil_mask
xpix = self.det_info['xpix']
ypix = self.det_info['ypix']
# Set logging to WARNING to suppress messages
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
det_cal_obj = nircam_cal(self.scaid, verbose=False)
# If requesting dark images
if do_dark:
im_slope = np.zeros([ypix,xpix])
# If slope image is not specified
elif im_slope is None:
# Detector sampled images
gen_psf = self.calc_psf_from_coeff
kw_gen_psf = {'return_oversample': False,'return_hdul': False}
# Imaging+Coronagraphy
if pupil_mask is None:
im_slope = gen_psf(sp=sp, **kw_gen_psf)
# No visible source
elif ('FLAT' in pupil_mask) or (sp is None):
im_slope = np.zeros([ypix,xpix])
# Grism spec
elif ('GRISM' in pupil_mask):
w, im_slope = gen_psf(sp=sp, **kw_gen_psf)
# DHS spectroscopy
elif ('DHS' in pupil_mask):
raise NotImplementedError('DHS has yet to be fully included')
# Imaging+Coronagraphy
else:
im_slope = gen_psf(sp=sp, **kw_gen_psf)
# Expand or cut to detector size
im_slope = pad_or_cut_to_size(im_slope, (ypix,xpix))
# Add in Zodi emission
# Returns 0 if self.pupil_mask='FLAT'
im_slope += self.bg_zodi_image(**kwargs)
# Minimum value of slope
im_min = im_slope[im_slope>=0].min()
# Expand or cut to detector size
im_slope = pad_or_cut_to_size(im_slope, (ypix,xpix))
# Make sure there are no negative numbers
im_slope[im_slope<=0] = im_min
# Create a list of arguments to pass
worker_arguments = []
for i in range(nint):
rseed_i = rng.integers(0,2**32-1)
kw = {'im_slope': im_slope, 'cframe': cframe,
'return_zero_frame': True, 'rand_seed': rseed_i}
kws = merge_dicts(kw, kwargs)
args = (det, det_cal_obj)
worker_arguments.append((args, kws))
res_zeros = []
res_ramps = []
for wa in tqdm(worker_arguments, desc='Ramps', leave=False):
out = gen_ramps(wa)
res_ramps.append(out[0])
res_zeros.append(out[1])
setup_logging(log_prev, verbose=False)
return np.asarray(res_ramps), np.asarray(res_zeros)
def simulate_level1b(self, target_name, ra, dec, date_obs, time_obs,
sp=None, im_slope=None, cframe='sci', nint=None, do_dark=False,
save_dir=None, return_model=False, return_hdul=False, **kwargs):
""" Simulate DMS Level 1b data model """
from .simul.dms import level1b_data_model, save_level1b_fits
from stdatamodels import fits_support
# Update total number of integrations
if nint is not None:
nint_orig = self.Detector.multiaccum.nint
self.update_detectors(nint=nint)
kwargs['out_ADU'] = True
sci_data, zero_data = self.simulate_ramps(sp=sp, im_slope=im_slope, cframe=cframe, nint=nint,
do_dark=do_dark, **kwargs)
obs_params = self._gen_obs_params(target_name, ra, dec, date_obs, time_obs, **kwargs)
obs_params['save_dir'] = save_dir
outModel = level1b_data_model(obs_params, sci_data=sci_data, zero_data=zero_data)
if save_dir:
save_level1b_fits(outModel, obs_params, save_dir=save_dir)
# Return number of integrations
if nint is not None:
self.update_detectors(nint=nint_orig)
if return_hdul:
out_hdul, out_asdf = fits_support.to_fits(outModel._instance, outModel._schema)
if return_model and return_hdul:
return outModel, out_hdul
elif return_model:
return outModel
elif return_hdul:
return out_hdul
def table_filter(t, topn=None, **kwargs):
"""Filter and sort table.
Filter a resulting ramp table to exclude those with worse SNR for the same
or larger tacq. This is performed on a pattern-specific basis and returns
the Top N rows for each readout patten. The rows are ranked by an efficiency
metric, which is simply SNR / sqrt(tacq). If topn is set to None, then all
values that make the cut are returned (sorted by the efficiency metric).
Args
----
topn : int, None
Maximum number of rows to keep.
"""
if topn is None: topn = len(t)
temp = multiaccum()
pattern_settings = temp._pattern_settings
patterns = np.unique(t['Pattern'])
m = np.zeros(len(patterns))
s = np.zeros(len(patterns))
for i,patt in enumerate(patterns):
v1,v2,v3 = pattern_settings.get(patt)
m[i] = v1
s[i] = v2
# Sort by nf (m+s) then by m
isort = | np.lexsort((m,m+s)) | numpy.lexsort |
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline
import logging
from sklearn.preprocessing import Normalizer
from argmining.models.stts import STTS_TAGSET
from argmining.models.uts import UTS_TAGSET, get_UTS_tag
from collections import OrderedDict
import numpy as np
from sklearn.feature_selection import chi2
from sklearn.feature_selection import SelectKBest
def build(use_STTS=True):
pipeline = Pipeline([('transformer',
POSDistribution(use_STTS=use_STTS)),
('normalizer', Normalizer())
])
return ('pos_distribution', pipeline)
def build_feature_selection(use_STTS=True, k=5):
pipeline = Pipeline([('transformer',
POSDistribution(use_STTS=use_STTS)),
('feature_selection',
SelectKBest(chi2, k=k)),
('normalizer', Normalizer())
])
return ('pos_distribution', pipeline)
def get_pos_histogram(pos_list, tag_set):
histogram = OrderedDict.fromkeys(tag_set, 0)
for entry in pos_list:
histogram[entry] += 1
values = []
for key, value in histogram.items():
values.append(value)
histogram = | np.array(values, dtype=np.float64) | numpy.array |
import os
import sys
import glob
import pickle as pkl
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.stats import ttest_rel
def load_stratified_prediction_results(results_dir, experiment_descriptor):
"""Load results of stratified prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
experiment_descriptor (str): string describing this experiment, can be
useful to segment analyses involving multiple
experiments or results sets
Returns
-------
results_df (pd.DataFrame): results of classification experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for identifier in results_dir.iterdir():
identifier_dir = Path(results_dir, identifier)
if identifier_dir.is_file(): continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
# skip compressed files here, use load_compressed* functions
# to load that data separately
if check_compressed_file(results_filename): continue
if ('classify' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
id_results_df['experiment'] = experiment_descriptor
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_compressed_prediction_results(results_dir,
experiment_descriptor,
old_filenames=False):
"""Load results of compressed prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
experiment_descriptor (str): string describing this experiment, can be
useful to segment analyses involving multiple
experiments or results sets
old_filenames (bool): use old filename format
Returns
-------
results_df (pd.DataFrame): results of classification experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for identifier in results_dir.iterdir():
identifier_dir = Path(results_dir, identifier)
if identifier_dir.is_file(): continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if not check_compressed_file(results_filename): continue
if ('classify' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
if old_filenames:
try:
n_dims = int(results_filename.split('_')[-3].replace('n', ''))
except ValueError:
n_dims = int(results_filename.split('_')[-2].replace('n', ''))
else:
n_dims = int(results_filename.split('_')[-2].replace('n', ''))
id_results_df = pd.read_csv(results_file, sep='\t')
id_results_df['n_dims'] = n_dims
id_results_df['experiment'] = experiment_descriptor
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_purity_results(results_dir, classify=True):
"""Load results of tumor purity experiments.
Arguments
---------
results_dir (str): directory containing results files
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if classify and ('classify' not in results_filename
or 'metrics' not in results_filename): continue
if not classify and ('regress' not in results_filename
or 'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
if check_compressed_file(results_filename):
id_results_df.training_data += '_compressed'
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_msi_results(results_dir):
"""Load results of microsatellite instability prediction experiments.
Arguments
---------
results_dir (str): directory containing results files
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if ('classify' not in results_filename
or 'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
# TODO: n_dims?
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_purity_by_cancer_type(results_dir, sample_info_df, classify=True):
"""Load results of tumor purity prediction, grouped by cancer type.
Assumes labels are binarized into above/below median.
Arguments
---------
results_dir (str): directory containing results files
sample_info_df (pd.DataFrame): contains cancer type info for samples
classify (bool): look for classification results if true
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if classify and ('classify' not in results_filename
or 'preds' not in results_filename): continue
if not classify and ('regress' not in results_filename
or 'preds' not in results_filename): continue
if results_filename[0] == '.': continue
if check_compressed_file(results_filename):
training_data = '_'.join(results_filename.split('_')[:-5])
training_data += '_compressed'
signal = results_filename.split('_')[-5]
seed = int(results_filename.split('_')[-3].replace('s', ''))
else:
training_data = '_'.join(results_filename.split('_')[:-4])
signal = results_filename.split('_')[-4]
seed = int(results_filename.split('_')[-2].replace('s', ''))
id_results_df = pd.read_csv(results_file, sep='\t', index_col=0)
cancer_type_results_df = calculate_metrics_for_cancer_type(id_results_df,
training_data,
signal,
seed,
sample_info_df,
classify=classify)
results_df = pd.concat((results_df, cancer_type_results_df))
return results_df
def load_survival_curves(results_dir,
cancer_type,
signal='signal'):
samples, functions = [], []
path_name = os.path.join(
results_dir,
'{}_survival_{}_fold*_functions.pkl'.format(cancer_type, signal)
)
for fname in glob.glob(path_name):
with open(fname, 'rb') as f:
fns_dict = pkl.load(f)
samples += list(fns_dict['samples'])
functions += list(fns_dict['functions'])
return samples, functions
def calculate_metrics_for_cancer_type(id_results_df,
training_data,
signal,
seed,
sample_info_df,
classify=True):
cancer_type_results = []
for fold in id_results_df.fold_no.unique():
fold_df = (id_results_df[id_results_df.fold_no == fold]
.merge(sample_info_df, left_index=True, right_index=True)
.drop(columns=['sample_type', 'id_for_stratification'])
)
for cancer_type in fold_df.cancer_type.unique():
samples_df = fold_df[fold_df.cancer_type == cancer_type]
if classify:
from mpmp.prediction.classification import get_threshold_metrics
try:
with warnings.catch_warnings():
# get rid of ROC/PR sample imbalance warnings, we'll catch
# that case below
warnings.filterwarnings('ignore',
message='No negative samples')
warnings.filterwarnings('ignore',
message='No positive samples')
warnings.filterwarnings('ignore',
message='invalid value encountered')
aupr = (
get_threshold_metrics(samples_df.true_class,
samples_df.positive_prob)
)['aupr']
auroc = (
get_threshold_metrics(samples_df.true_class,
samples_df.positive_prob)
)['auroc']
except ValueError: # only one class in y_true
aupr = np.nan
auroc = np.nan
metric_names = ['aupr', 'auroc']
cancer_type_results.append((training_data, signal, seed,
fold, cancer_type, aupr, auroc))
else:
from mpmp.prediction.regression import get_continuous_metrics
metrics = get_continuous_metrics(samples_df.true_label,
samples_df.predicted_output)
rmse = metrics['rmse']
r2 = metrics['r2']
metric_names = ['rmse', 'r2']
cancer_type_results.append((training_data, signal, seed,
fold, cancer_type, rmse, r2))
return pd.DataFrame(cancer_type_results,
columns=['training_data', 'signal', 'seed',
'fold_no', 'cancer_type'] + metric_names)
def check_compressed_file(results_filename):
"""Check if results file is from compressed experiments."""
def string_is_int(s):
# https://stackoverflow.com/a/1267145
try:
int(s)
return True
except ValueError:
return False
# if a file uses compressed data, one component of the filename
# should have the format 'n{integer}'
for rs in results_filename.split('_'):
if rs.startswith('n') and string_is_int(rs.split('n')[1]):
return True
return False
def load_preds_to_matrix(preds_dir,
sample_info_df,
training_data='expression'):
"""Load model predictions into a heatmap/confusion matrix.
Arguments
---------
preds_dir (str): directory where preds files are located
sample_info_df (pd.DataFrame): dataframe containing sample information
training_data (str): type of training data to filter to, if None don't
filter
Returns
---------
preds_df (str): a cancer type x cancer type dataframe, index contains
target label and columns are true labels, cells contain
average positive class probability for a model trained on
the target label and evaluated on the true label (high
probability = model predicts column class when trained on
row class)
"""
preds_df = pd.DataFrame()
for identifier in Path(preds_dir).iterdir():
identifier_dir = Path(preds_dir, identifier)
if identifier_dir.is_file():
continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file():
continue
results_filename = str(results_file.stem)
if 'preds' not in results_filename:
continue
if 'signal' not in results_filename:
continue
if (training_data is not None and
training_data not in results_filename):
continue
cancer_type_preds_df = (
pd.read_csv(results_file, sep='\t', index_col=0)
.merge(sample_info_df[['cancer_type']],
left_index=True, right_index=True)
.drop(columns=['fold_no', 'true_class'])
.groupby('cancer_type')
.mean()
.T
.rename(index={'positive_prob': results_filename.split('_')[0]})
)
preds_df = pd.concat((preds_df, cancer_type_preds_df))
return preds_df.sort_index()
def compare_results(condition_1_df,
condition_2_df=None,
identifier='gene',
metric='auroc',
correction=False,
correction_method='fdr_bh',
correction_alpha=0.05,
verbose=False):
"""Compare cross-validation results between two experimental conditions.
Main uses for this are comparing an experiment against its negative control
(shuffled labels), and for comparing two experimental "conditions" (e.g.
different models, different data types) against one another.
Arguments
---------
condition_1_df (pd.DataFrame): either a single dataframe to compare against
its negative control, or the first of 2
conditions to compare against each other
condition_2_df (pd.DataFrame): if provided, a second dataframe to compare
against condition_1_df
identifier (str): column to use as the sample identifier
metric (str): column to use as the evaluation metric
correction (bool): whether or not to use a multiple testing correction
correction_method (str): which method to use for multiple testing correction
(from options in statsmodels.stats.multitest)
correction_alpha (float): significance cutoff to use
verbose (bool): if True, print verbose output to stderr
Returns
-------
results_df (pd.DataFrame): identifiers and results of statistical test
"""
if condition_2_df is None:
results_df = compare_control(condition_1_df, identifier, metric, verbose)
else:
results_df = compare_experiment(condition_1_df, condition_2_df,
identifier, metric, verbose)
if correction:
from statsmodels.stats.multitest import multipletests
corr = multipletests(results_df['p_value'],
alpha=correction_alpha,
method=correction_method)
results_df = results_df.assign(corr_pval=corr[1], reject_null=corr[0])
return results_df
def compare_control(results_df,
identifier='gene',
metric='auroc',
verbose=False):
results = []
unique_identifiers = np.unique(results_df[identifier].values)
for id_str in unique_identifiers:
conditions = ((results_df[identifier] == id_str) &
(results_df.data_type == 'test') &
(results_df.signal == 'signal'))
signal_results = results_df[conditions][metric].values
signal_seeds = results_df[conditions]['seed'].values
signal_folds = results_df[conditions]['fold'].values
conditions = ((results_df[identifier] == id_str) &
(results_df.data_type == 'test') &
(results_df.signal == 'shuffled'))
shuffled_results = results_df[conditions][metric].values
shuffled_seeds = results_df[conditions]['seed'].values
shuffled_folds = results_df[conditions]['fold'].values
if signal_results.shape != shuffled_results.shape:
if verbose:
print('shapes unequal for {}, skipping'.format(id_str),
file=sys.stderr)
continue
if not (np.array_equal(np.unique(signal_seeds), np.unique(shuffled_seeds))
and np.array_equal(np.unique(signal_folds), np.unique(shuffled_folds))):
if verbose:
print('samples unequal for {}, skipping'.format(id_str),
file=sys.stderr)
continue
if (signal_results.size == 0) or (shuffled_results.size == 0):
if verbose:
print('size 0 results array for {}, skipping'.format(id_str),
file=sys.stderr)
continue
# make sure seeds and folds are in same order
# this is necessary for paired t-test
try:
assert np.array_equal(signal_seeds, shuffled_seeds)
assert np.array_equal(signal_folds, shuffled_folds)
except AssertionError:
print(id_str, file=sys.stderr)
print(signal_seeds, shuffled_seeds, file=sys.stderr)
print(signal_folds, shuffled_folds, file=sys.stderr)
if np.array_equal(signal_results, shuffled_results):
delta_mean = 0
p_value = 1.0
else:
delta_mean = | np.mean(signal_results) | numpy.mean |
#**********************************************************************************************
# Traffic Emulator for Network Services
# Copyright 2020 VMware, Inc
# The BSD-2 license (the "License") set forth below applies to all parts of
# the Traffic Emulator for Network Services project. You may not use this file
# except in compliance with the License.
#
# BSD-2 License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE
#**********************************************************************************************
"""
#SAMPLE VIP METRICS QUERY
WITH vip_stats AS (
SELECT vip AS vip_,
sum(connections) AS connections,
sum(sessions) AS sessions,
sum(connections)/NULLIF(EXTRACT(EPOCH FROM max(tcp_client_vip_metrics.ts_ctrl)-min(tcp_client_vip_metrics.ts_ctrl)+15*'1 second'::interval),0) AS cps
FROM tcp_client_vip_metrics WHERE (id > 0)
GROUP BY vip
), url_stats AS (
WITH temp AS (
SELECT tcp_client_url_metrics.vip, tcp_client_url_metrics.method, tcp_client_url_metrics.uri,
sum(mean_latency * resp_rcvd) / NULLIF(sum(resp_rcvd), 0) AS net_mean
FROM tcp_client_url_metrics WHERE (id > 0 )
GROUP BY tcp_client_url_metrics.vip, tcp_client_url_metrics.method, tcp_client_url_metrics.uri
)
SELECT tcp_client_url_metrics.vip, tcp_client_url_metrics.method, tcp_client_url_metrics.uri,
sum(http_gets_sent) AS http_gets_sent,
sum(http_gets_rcvd) AS http_gets_rcvd,
sum(http_posts_sent) AS http_posts_sent,
sum(http_posts_rcvd) AS http_posts_rcvd,
sum(reqs_sent) AS reqs_sent,
sum(resp_rcvd) AS resp_rcvd,
sum(responses_1xx) AS responses_1xx,
sum(responses_2xx) AS responses_2xx,
sum(responses_200) AS responses_200,
sum(responses_3xx) AS responses_3xx,
sum(responses_4xx) AS responses_4xx,
sum(responses_404) AS responses_404,
sum(responses_5xx) AS responses_5xx,
sum(failed_reqs) AS failed_reqs,
sum(len_fail) AS len_fail,
sum(persist_fail) AS persist_fail,
sum(tcp_failures) AS tcp_failures,
min(latency_min) AS latency_min,
max(latency_max) AS latency_max,
sum(bytes_download) AS bytes_download,
sum(responses_200)/NULLIF(EXTRACT(EPOCH FROM max(tcp_client_url_metrics.ts_ctrl)-min(tcp_client_url_metrics.ts_ctrl)+15*'1 second'::interval),0) AS rps,
sum(bytes_download)/NULLIF(EXTRACT(EPOCH FROM max(tcp_client_url_metrics.ts_ctrl)-min(tcp_client_url_metrics.ts_ctrl)+15*'1 second'::interval),0) AS tput,
sum(mean_latency * resp_rcvd) / NULLIF(sum(resp_rcvd), 0) AS mean_latency,
sqrt(sum(resp_rcvd * (power(mean_latency - temp.net_mean, 2) + var_latency)) / NULLIF(sum(resp_rcvd), 0)) AS sd_latency
FROM tcp_client_url_metrics
INNER JOIN temp ON tcp_client_url_metrics.vip = temp.vip AND tcp_client_url_metrics.method = temp.method AND tcp_client_url_metrics.uri = temp.uri
WHERE (tcp_client_url_metrics.id > 0)
GROUP BY tcp_client_url_metrics.vip, tcp_client_url_metrics.method, tcp_client_url_metrics.uri
) SELECT * from url_stats INNER JOIN vip_stats ON url_stats.vip = vip_stats.vip_;
#SAMPLE QUERY TO MEMORY METRICS
https://stackoverflow.com/a/26388845
SELECT temp.index, SUM(malloc[temp.index]), SUM(free[temp.index]) FROM memory_metrics
JOIN (select generate_subscripts(malloc, 1) AS index, id AS iter FROM memory_metrics) AS temp
ON temp.iter = memory_metrics.id
GROUP BY temp.index;
#SAMPLE QUERY TO ERROR METRICS
https://dba.stackexchange.com/questions/100965/combining-separate-ranges-into-largest-possible-contiguous-ranges
WITH a AS (
SELECT vip, res_tag, ses_tag, error_type, counter, ts_range,
COALESCE(lower(ts_range),'-infinity') AS startdate,
max(COALESCE(upper(ts_range), 'infinity')) OVER (ORDER BY ts_range) AS enddate
FROM error_metrics
INNER JOIN resource_configs ON resource_configs.res_hash = error_metrics.res_hash
INNER JOIN session_configs ON session_configs.ses_hash = error_metrics.ses_hash
WHERE vip='http://10.52.180.160'
),
b AS (
SELECT *, lag(enddate) OVER (ORDER BY ts_range) < (startdate - (15 * interval '1 second')) OR NULL AS step
FROM a
),
c AS (
SELECT *, count(step) OVER (ORDER BY ts_range) AS grp
FROM b
)
SELECT vip, res_tag, ses_tag, error_type, sum(counter), tsrange(min(startdate), max(enddate)) AS ts_range
FROM c
GROUP BY vip, res_tag, ses_tag, error_type, grp;
"""
######################### ZMQ #########################
import asyncio, uvloop
import zmq, aiozmq
####################### FOR POSTGRES #####################
from psycopg2 import connect
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from psycopg2.extras import DictCursor
######################### GENERIC #########################
import traceback
import time, os
from concurrent import futures
import json
from collections import OrderedDict
from datetime import datetime
from copy import copy
import decimal
import numpy as np
from multiprocessing import Process
from threading import Thread
######################### TE IMPORTS #########################
from TE_UTILS import Logger, dict_merge, convert
######################### GLOBAL DEFs #########################
# Metrics Profile message
# NOTE: Metrics Profile message is shared with te_stat_collector.h and TE_WRAP.py
# Changes must be reflected at both the places
HTTP_PROFILE = 1
UDP_CLIENT_PROFILE = 2
UDP_SERVER_PROFILE = 3
class TE_POSTGRES:
def __init__(self, postgres_port, logpath, loglevel, stat_collect_interval):
#LOGGER
log_file = os.path.join(logpath, 'te-postgres.log')
self.__lgr = Logger('[ TE POSTGRES ]', log_file, loglevel).getLogger()
self.__lgr.info("Init Of TE_POSTGRES")
self.__stat_collect_interval = stat_collect_interval
try:
self.__config = {
'user' : 'te',
'password' : 'te',
'db' : 'te',
'host' : 'localhost',
'port' : int(postgres_port),
}
init_connection = connect(dbname="postgres", user="postgres", password="<PASSWORD>", \
port=self.__config['port'])
init_connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
try:
with init_connection.cursor() as cursor:
cursor.execute("create user te with password 'te'")
cursor.execute("create database te")
cursor.execute("grant all privileges on database te to te")
init_connection.commit()
self.__lgr.info("Initialized database for te")
except:
init_connection.rollback()
self.__lgr.error("Initial rollback %s" %traceback.format_exc())
finally:
init_connection.close()
#Setting basic configuration for Postgres
self.__configure_db()
self.__configure_queries()
except:
self.__lgr.error("ERROR in __init__: %s" %traceback.format_exc())
def alter_stat_collect_interval(self, stat_collect_interval):
self.__stat_collect_interval = stat_collect_interval
return
def get_configs(self):
return self.__config
def __configure_queries(self):
try:
self.__metric_keys_as_csv = {}
self.__metric_keys = {}
#TCP CLIENT VIP METRICS
self.__metric_keys['tcp_client_vip_metrics'] = ['connections', 'sessions']
#'good_connections', 'failed_connections' ==> Has problem in TE_DP (ONly for TCP CLIENT)
query_metric_list = []
for number_key in self.__metric_keys['tcp_client_vip_metrics']:
query_metric_list.append("sum(%s) AS %s" %(number_key, number_key))
self.__metric_keys['tcp_client_vip_metrics'].append("cps")
query_metric_list.append("sum(connections)/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS cps" %self.__stat_collect_interval)
self.__metric_keys_as_csv["tcp_client_vip_metrics"] = ", ".join(query_metric_list)
#UDP CLIENT VIP METRICS
self.__metric_keys['udp_client_vip_metrics'] = ['good_connections', 'failed_connections', 'sessions']
query_metric_list = []
for number_key in self.__metric_keys['udp_client_vip_metrics']:
query_metric_list.append("sum(%s) AS %s" %(number_key, number_key))
self.__metric_keys['udp_client_vip_metrics'].append("cps")
query_metric_list.append("sum(good_connections)/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS cps" %self.__stat_collect_interval)
self.__metric_keys_as_csv["udp_client_vip_metrics"] = ", ".join(query_metric_list)
#UDP SERVER VIP METRICS
self.__metric_keys['udp_server_vip_metrics'] = ["dg_rcvd", "dg_recv_timedout",
"dg_size_rcvd", "dg_sent", "dg_send_fail", "dg_size_sent", "request_rcvd",
"request_recv_timedout", "response_sent", "response_send_fail"]
query_metric_list = []
# Common Queries
for number_key in self.__metric_keys['udp_server_vip_metrics']:
query_metric_list.append("sum(%s) AS %s" %(number_key, number_key))
# RPS
self.__metric_keys['udp_server_vip_metrics'].append("rps")
query_metric_list.append("(sum(request_rcvd)+sum(response_sent))/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS rps" %self.__stat_collect_interval)
# DPS
self.__metric_keys['udp_server_vip_metrics'].append("dps")
query_metric_list.append("(sum(dg_sent)+sum(dg_rcvd))/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS dps" %self.__stat_collect_interval)
# TPUT
self.__metric_keys['udp_server_vip_metrics'].append("tput")
query_metric_list.append("(sum(dg_size_sent)+sum(dg_size_rcvd))/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS tput" %self.__stat_collect_interval)
# Add to __metric_keys_as_csv
self.__metric_keys_as_csv["udp_server_vip_metrics"] = ", ".join(query_metric_list)
#TCP CLIENT URL METRICS
# Wait for the code to mature and clean up the mess below
self.__metric_keys['tcp_client_url_metrics'] = \
["http_gets_sent", "http_gets_rcvd", "http_posts_sent", "http_posts_rcvd", "reqs_sent", \
"resp_rcvd", "responses_1xx", "responses_2xx", "responses_200", "responses_3xx", \
"responses_4xx", "responses_404", "responses_5xx", "failed_reqs", "len_fail", \
"persist_fail", "tcp_failures", "bytes_download"]
query_metric_list = []
# Common queries
for number_key in self.__metric_keys['tcp_client_url_metrics']:
query_metric_list.append("sum(%s) AS %s" %(number_key, number_key))
# Minimum Latency
self.__metric_keys['tcp_client_url_metrics'].append("latency_min")
query_metric_list.append("min(latency_min) AS latency_min")
# Maximum Latency
self.__metric_keys['tcp_client_url_metrics'].append("latency_max")
query_metric_list.append("max(latency_max) AS latency_max")
# RPS
self.__metric_keys['tcp_client_url_metrics'].append("rps")
query_metric_list.append("sum(responses_200)/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS rps" %self.__stat_collect_interval)
# TPUT
self.__metric_keys['tcp_client_url_metrics'].append("tput")
query_metric_list.append("sum(bytes_download)/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS tput" %self.__stat_collect_interval)
# Add to __metric_keys_as_csv
self.__metric_keys_as_csv["tcp_client_url_metrics"] = ", ".join(query_metric_list)
#Derived Keys (Not as a part of the table) -- seperate handling
self.__metric_keys['tcp_client_url_metrics'].append("mean_latency")
self.__metric_keys['tcp_client_url_metrics'].append("sd_latency")
#UDP CLIENT URL METRICS
self.__metric_keys['udp_client_url_metrics'] = \
["reqs_sent", "reqs_failed", "dg_sent", "dg_size_sent", "dg_send_fail",
"resp_rcvd", "resp_timedout", "dg_recd", "dg_size_recd", "dg_recv_timedout",
"latency_min", "latency_max", "mean_latency", "var_latency"]
query_metric_list = []
# Common queries
for number_key in self.__metric_keys['udp_client_url_metrics']:
query_metric_list.append("sum(%s) AS %s" %(number_key, number_key))
# Minimum Latency
self.__metric_keys['udp_client_url_metrics'].append("latency_min")
query_metric_list.append("min(latency_min) AS latency_min")
# Maximum Latency
self.__metric_keys['udp_client_url_metrics'].append("latency_max")
query_metric_list.append("min(latency_max) AS latency_max")
# RPS
self.__metric_keys['udp_client_url_metrics'].append("rps")
query_metric_list.append("sum(resp_rcvd)/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS rps" %self.__stat_collect_interval)
# TPUT
self.__metric_keys['udp_client_url_metrics'].append("tput")
query_metric_list.append("(sum(dg_size_sent)+sum(dg_size_recd))/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS tput" %self.__stat_collect_interval)
#Datagrams Per Second
self.__metric_keys['udp_client_url_metrics'].append("dps")
query_metric_list.append("(sum(dg_sent)+sum(dg_recd))/NULLIF(EXTRACT(EPOCH FROM " \
"max(ts_ctrl)-min(ts_ctrl)+%d*'1 second'::interval),0) AS dps" %self.__stat_collect_interval)
# Add to __metric_keys_as_csv
self.__metric_keys_as_csv['udp_client_url_metrics'] = ", ".join(query_metric_list)
#Derived Keys (Not as a part of the table) -- seperate handling
self.__metric_keys['udp_client_url_metrics'].append("mean_latency")
self.__metric_keys['udp_client_url_metrics'].append("sd_latency")
# TCP CLIENT SES METRICS
self.__metric_keys['tcp_client_ses_metrics'] = \
['sessions','total_connections','cycles_complete','reqs_sent',\
'resp_rcvd','http_gets_sent','http_gets_rcvd','http_posts_sent','http_posts_rcvd',\
'failed_reqs','len_fail','persist_fail','post_fnf','bytes_download','complete_time',
'open_connections']
query_metric_list = []
for number_key in self.__metric_keys['tcp_client_ses_metrics']:
query_metric_list.append("sum(%s) AS %s" %(number_key, number_key))
self.__metric_keys_as_csv["tcp_client_ses_metrics"] = ", ".join(query_metric_list)
# UDP CLIENT SES METRICS
self.__metric_keys['udp_client_ses_metrics'] = \
["sessions", "cycles_complete", "good_connections", "failed_connections", "reqs_sent",
"reqs_failed", "dg_sent", "dg_size_sent", "dg_send_fail", "resp_rcvd", "resp_timedout",
"dg_recd", "dg_size_recd", "dg_recv_timedout"]
query_metric_list = []
for number_key in self.__metric_keys['udp_client_ses_metrics']:
query_metric_list.append("sum(%s) AS %s" %(number_key, number_key))
self.__metric_keys_as_csv["udp_client_ses_metrics"] = ", ".join(query_metric_list)
#MEMORY
self.__metric_keys['memory_metrics'] = ['free', 'malloc']
self.__metric_keys_as_csv["memory_metrics"] = ", ".join(query_metric_list)
#Session and Resource Configs
self.__metric_keys['session_configs'] = ['ses_config']
self.__metric_keys['resource_configs'] = ['res_config']
except:
self.__lgr.error(traceback.format_exc())
def __configure_db(self):
self.__tables = {}
self.__tables['resource_configs'] = """CREATE TABLE resource_configs (
res_hash VARCHAR(64) NOT NULL PRIMARY KEY,
res_tag VARCHAR(64) NOT NULL,
res_config JSON NOT NULL )"""
self.__tables['session_configs'] = """CREATE TABLE session_configs (
ses_hash VARCHAR(64) NOT NULL PRIMARY KEY,
ses_tag VARCHAR(64) NOT NULL,
ses_config JSON NOT NULL )"""
#RUNNING CONFIG TABLE (Holds the entire history of run)
self.__tables['running_configs'] = """CREATE TABLE running_configs (
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
traffic_mode VARCHAR(7) NOT NULL,
traffic_profile VARCHAR(4) NOT NULL,
host_ip VARCHAR(128) NOT NULL,
cpu INTEGER NOT NULL,
start_time TIMESTAMP NOT NULL,
end_time TIMESTAMP )"""
self.__tables['tcp_client_vip_metrics'] = """CREATE TABLE tcp_client_vip_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
connections NUMERIC(20,1) NOT NULL,
good_connections NUMERIC(20,1) NOT NULL,
failed_connections NUMERIC(20,1) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, res_hash, ses_hash) )"""
self.__tables['udp_client_vip_metrics'] = """CREATE TABLE udp_client_vip_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
good_connections NUMERIC(20,1) NOT NULL,
failed_connections NUMERIC(20,1) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, res_hash, ses_hash) )"""
self.__tables['udp_server_vip_metrics'] = """CREATE TABLE udp_server_vip_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
dg_rcvd NUMERIC(20,1) NOT NULL,
dg_recv_timedout NUMERIC(20,1) NOT NULL,
dg_size_rcvd NUMERIC(20,1) NOT NULL,
dg_sent NUMERIC(20,1) NOT NULL,
dg_send_fail NUMERIC(20,1) NOT NULL,
dg_size_sent NUMERIC(20,1) NOT NULL,
request_rcvd NUMERIC(20,1) NOT NULL,
request_recv_timedout NUMERIC(20,1) NOT NULL,
response_sent NUMERIC(20,1) NOT NULL,
response_send_fail NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip) )"""
self.__tables['ses_bucket_metrics'] = """CREATE TABLE ses_bucket_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
metrics JSON NOT NULL,
PRIMARY KEY(ts, host_ip, vip, res_hash, ses_hash) )"""
self.__tables['tcp_client_url_metrics'] = """CREATE TABLE tcp_client_url_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
uri VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
http_gets_sent NUMERIC(20,1) NOT NULL,
http_gets_rcvd NUMERIC(20,1) NOT NULL,
http_posts_sent NUMERIC(20,1) NOT NULL,
http_posts_rcvd NUMERIC(20,1) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
responses_1xx NUMERIC(20,1) NOT NULL,
responses_2xx NUMERIC(20,1) NOT NULL,
responses_200 NUMERIC(20,1) NOT NULL,
responses_3xx NUMERIC(20,1) NOT NULL,
responses_4xx NUMERIC(20,1) NOT NULL,
responses_404 NUMERIC(20,1) NOT NULL,
responses_5xx NUMERIC(20,1) NOT NULL,
failed_reqs NUMERIC(20,1) NOT NULL,
len_fail NUMERIC(20,1) NOT NULL,
persist_fail NUMERIC(20,1) NOT NULL,
tcp_failures NUMERIC(20,1) NOT NULL,
mean_latency NUMERIC(20,15) NOT NULL,
var_latency NUMERIC(20,15) NOT NULL,
latency_min NUMERIC(20,15) NOT NULL,
latency_max NUMERIC(20,15) NOT NULL,
bytes_download NUMERIC(30,10) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, uri, res_hash, ses_hash) )"""
self.__tables['udp_client_url_metrics'] = """CREATE TABLE udp_client_url_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
reqs_failed NUMERIC(20,1) NOT NULL,
dg_sent NUMERIC(20,1) NOT NULL,
dg_size_sent NUMERIC(20,1) NOT NULL,
dg_send_fail NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
resp_timedout NUMERIC(20,1) NOT NULL,
dg_recd NUMERIC(20,1) NOT NULL,
dg_size_recd NUMERIC(20,1) NOT NULL,
dg_recv_timedout NUMERIC(20,1) NOT NULL,
latency_min NUMERIC(20,15) NOT NULL,
latency_max NUMERIC(20,15) NOT NULL,
mean_latency NUMERIC(20,15) NOT NULL,
var_latency NUMERIC(20,15) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, res_hash, ses_hash) )"""
self.__tables['url_bucket_metrics'] = """CREATE TABLE url_bucket_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
uri VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
metrics JSON NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, uri, res_hash, ses_hash) )"""
self.__tables['tcp_client_ses_metrics'] = """CREATE TABLE tcp_client_ses_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
open_connections NUMERIC(20,1) NOT NULL,
total_connections NUMERIC(20,1) NOT NULL,
cycles_complete NUMERIC(20,1) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
http_gets_sent NUMERIC(20,1) NOT NULL,
http_gets_rcvd NUMERIC(20,1) NOT NULL,
http_posts_sent NUMERIC(20,1) NOT NULL,
http_posts_rcvd NUMERIC(20,1) NOT NULL,
failed_reqs NUMERIC(20,1) NOT NULL,
len_fail NUMERIC(20,1) NOT NULL,
persist_fail NUMERIC(20,1) NOT NULL,
post_fnf NUMERIC(20,1) NOT NULL,
bytes_download NUMERIC(30,10) NOT NULL,
complete_time NUMERIC(30,15) NOT NULL,
PRIMARY KEY(ts, host_ip, res_hash, ses_hash) )"""
self.__tables['udp_client_ses_metrics'] = """CREATE TABLE udp_client_ses_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
sessions NUMERIC(20,1) NOT NULL,
cycles_complete NUMERIC(20,1) NOT NULL,
good_connections NUMERIC(20,1) NOT NULL,
failed_connections NUMERIC(20,1) NOT NULL,
reqs_sent NUMERIC(20,1) NOT NULL,
reqs_failed NUMERIC(20,1) NOT NULL,
dg_sent NUMERIC(20,1) NOT NULL,
dg_size_sent NUMERIC(20,1) NOT NULL,
dg_send_fail NUMERIC(20,1) NOT NULL,
resp_rcvd NUMERIC(20,1) NOT NULL,
resp_timedout NUMERIC(20,1) NOT NULL,
dg_recd NUMERIC(20,1) NOT NULL,
dg_size_recd NUMERIC(20,1) NOT NULL,
dg_recv_timedout NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, res_hash, ses_hash) )"""
self.__tables['memory_metrics'] = """CREATE TABLE memory_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
pid INTEGER NOT NULL,
malloc INTEGER[] NOT NULL,
free INTEGER[] NOT NULL,
PRIMARY KEY(ts, host_ip, res_hash, ses_hash, pid) )"""
self.__tables['error_metrics'] = """CREATE TABLE error_metrics (
id BIGSERIAL,
ts_ctrl TIMESTAMP NOT NULL,
ts TIMESTAMP NOT NULL,
host_ip VARCHAR(128) NOT NULL,
vip VARCHAR(128) NOT NULL,
method VARCHAR(10) NOT NULL,
uri VARCHAR(128) NOT NULL,
res_hash VARCHAR(64) NOT NULL,
ses_hash VARCHAR(64) NOT NULL,
error_type VARCHAR(128) NOT NULL,
ts_range TSRANGE NOT NULL,
counter NUMERIC(20,1) NOT NULL,
PRIMARY KEY(ts, host_ip, vip, method, uri, res_hash, error_type, ses_hash) )"""
create_table_connection = connect(dbname=self.__config['db'], user=self.__config['user'], \
password=self.__config['password'], port=self.__config['port'])
try:
with create_table_connection.cursor() as cursor:
for table_name, query_to_create in self.__tables.items():
cursor.execute(query_to_create)
create_table_connection.commit()
self.__lgr.info("Created table {}".format(table_name))
except:
create_table_connection.rollback()
self.__lgr.error("Rollback during creation of {} ERROR={}".format(table_name, \
traceback.format_exc()))
finally:
create_table_connection.close()
try:
self.__intialize_row_counters()
#Specifies the keys that are grouped by, by default
self.__default_select_keys = {}
self.__default_select_keys['tcp_client_vip_metrics'] = ['vip']
self.__default_select_keys['tcp_client_url_metrics'] = ['vip']
self.__default_select_keys['udp_client_vip_metrics'] = ['vip']
self.__default_select_keys['udp_server_vip_metrics'] = ['vip']
self.__default_select_keys['udp_client_url_metrics'] = ['vip']
self.__default_select_keys['error_metrics'] = ['vip', 'error_type']
self.__default_select_keys['memory_metrics'] = ['index']
self.__default_select_keys['tcp_client_ses_metrics'] = []
self.__default_select_keys['udp_client_ses_metrics'] = []
#Specifies the order in which the group by operation has to be performed
self.__ORDER_OF_GROUP_BY = ['res_hash', 'res_tag', 'ses_hash', 'ses_tag', 'host_ip', 'vip',
'method', 'uri', #Only for tcp_client_url_metrics
'error_type', #Only for error_metrics
'pid', 'index' #Only for memory_metrics
]
self.__lgr.debug("ORDER: %s" %str(self.__ORDER_OF_GROUP_BY))
except:
self.__lgr.error(traceback.format_exc())
def __intialize_row_counters(self):
self.__last_read_row = {}
for key in self.__tables:
self.__last_read_row[key] = 0
def __insert_with_ts(self, conn, table_name, *args):
try:
values = ", ".join(map(str,args))
insert_query = "INSERT INTO {} VALUES (DEFAULT, {}, {})".format(table_name,
"'{}'".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S")), values)
self.__lgr.debug("__insert_with_ts={}".format(insert_query))
with conn.cursor() as cursor:
cursor.execute(insert_query)
conn.commit()
return True
except:
self.__lgr.error(traceback.format_exc())
conn.rollback()
return False
def __insert(self, conn, table_name, *args):
try:
values = ", ".join(map(str,args))
insert_query = "INSERT INTO {} VALUES ({})".format(table_name, values)
with conn.cursor() as cursor:
cursor.execute(insert_query)
conn.commit()
return True
except:
self.__lgr.error(traceback.format_exc())
conn.rollback()
return False
def __execute_query(self, conn, query, fetch=True):
try:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute(query)
if not fetch:
return True
else:
result = cursor.fetchall()
conn.commit()
return result
except:
conn.rollback()
self.__lgr.error("Error during executing {}. ERROR={}".format(query, traceback.format_exc()))
return None
def __insert_server_vip_metrics(self, db_connection, ts, host_ip, metrics_dict):
table_name = 'udp_server_vip_metrics'
for vip, metric_json in metrics_dict.items():
self.__insert_with_ts(db_connection, table_name,
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
metric_json['dg_rcvd'], metric_json['dg_recv_timedout'], metric_json['dg_size_rcvd'],
metric_json['dg_sent'], metric_json['dg_send_fail'], metric_json['dg_size_sent'],
metric_json['request_rcvd'], metric_json['request_recv_timedout'],
metric_json['response_sent'], metric_json['response_send_fail'])
def __insert_vip_metrics(self, db_connection, ts, host_ip, metrics_dict, is_bucketed=False):
try:
if(is_bucketed):
table_name = 'ses_bucket_metrics'
else:
table_name = {
HTTP_PROFILE : 'tcp_client_vip_metrics',
UDP_CLIENT_PROFILE : 'udp_client_vip_metrics'
}
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, ses_hash_values in res_hash_values.items():
for vip, metric_json in ses_hash_values.items():
profile_type = metric_json.get("profile_type", -1)
if(profile_type == UDP_CLIENT_PROFILE):
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
res_hash, ses_hash, metric_json['good_connections'], \
metric_json['failed_connections'], metric_json['sessions'])
elif(profile_type == HTTP_PROFILE):
if is_bucketed:
metric_json = {"buckets" : metric_json}
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
res_hash, ses_hash, "'{}'".format(metric_json))
else:
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip), "'{}'".format(vip), \
res_hash, ses_hash, metric_json['connections'], \
metric_json['good_connections'], metric_json['failed_connections'], \
metric_json['sessions'])
except:
self.__lgr.error("%s: %s" %(table_name, traceback.format_exc()))
def __insert_memory_metrics(self, db_connection, ts, host_ip, metrics_dict):
try:
table_name = 'memory_metrics'
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, ses_hash_values in res_hash_values.items():
for pid, metric_json in ses_hash_values.items():
self.__insert_with_ts(db_connection, table_name,
"'{}'".format(ts), "'{}'".format(host_ip), res_hash, ses_hash, pid,
"array{}".format(metric_json['malloc_metric']),
"array{}".format(metric_json['free_metric']))
except:
self.__lgr.error("%s: %s" %(table_name, traceback.format_exc()))
def __insert_error_metrics(self, db_connection, ts, host_ip, metrics_dict):
try:
table_name = 'error_metrics'
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, ses_hash_values in res_hash_values.items():
for vip, vip_values in ses_hash_values.items():
for method, method_values in vip_values.items():
for uri, error_values in method_values.items():
for error_type, metric_json in error_values.items():
self.__insert_with_ts(db_connection, table_name,
"'{}'".format(ts), "'{}'".format(host_ip), \
"'{}'".format(vip), "'{}'".format(method), \
"'{}'".format(uri), res_hash, ses_hash, \
"'{}'".format(error_type.replace("'",'')), "'[{}, {}]'".format(
metric_json['start_time'].rstrip(),
metric_json['end_time'].rstrip()),
metric_json['counter'])
except:
self.__lgr.error("%s: %s" %(table_name, traceback.format_exc()))
def __insert_url_metrics(self, db_connection, ts, host_ip, metrics_dict, is_bucketed=False):
try:
if(is_bucketed):
table_name = 'url_bucket_metrics'
else:
table_name = {
HTTP_PROFILE : 'tcp_client_url_metrics',
UDP_CLIENT_PROFILE : 'udp_client_url_metrics'
}
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, ses_hash_values in res_hash_values.items():
for vip, vip_values in ses_hash_values.items():
for method, method_values in vip_values.items():
for uri, metric_json in method_values.items():
profile_type = metric_json.get("profile_type", -1)
if(profile_type == UDP_CLIENT_PROFILE):
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip),
"'{}'".format(vip), "'{}'".format(method),
res_hash, ses_hash,
metric_json['reqs_sent'], metric_json['reqs_failed'],
metric_json['dg_sent'], metric_json['dg_size_sent'],
metric_json['dg_send_fail'], metric_json['resp_recd'],
metric_json['resp_timedout'], metric_json['dg_recd'],
metric_json['dg_size_recd'], metric_json['dg_recv_timedout'],
"'{}'".format(metric_json.get('min_latency','NaN')),
metric_json.get('max_latency', 0),
metric_json.get('mean_latency', 0),
metric_json.get('var_latency', 0))
elif(profile_type == HTTP_PROFILE):
if is_bucketed:
metric_json = {"buckets" : metric_json}
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip), \
"'{}'".format(vip), "'{}'".format(method), \
"'{}'".format(uri), res_hash, ses_hash, \
"'{}'".format(metric_json))
else:
# mean and var latency are calculalted on the fly by
# stat_collector and can be potentially be unavailable
self.__insert_with_ts(db_connection, table_name[profile_type],
"'{}'".format(ts), "'{}'".format(host_ip),
"'{}'".format(vip), "'{}'".format(method),
"'{}'".format(uri), res_hash, ses_hash,
metric_json['http_gets_sent'],
metric_json['http_gets_rcvd'],
metric_json['http_posts_sent'],
metric_json['http_posts_rcvd'],
metric_json['reqs_sent'], metric_json['resp_rcvd'],
metric_json['responses_1xx'],
metric_json['responses_2xx'],
metric_json['responses_200'],
metric_json['responses_3xx'],
metric_json['responses_4xx'],
metric_json['responses_404'],
metric_json['responses_5xx'],
metric_json['failed_reqs'],
metric_json['len_fail'],
metric_json['persist_fail'],
metric_json['tcp_failures'],
metric_json.get('mean_latency', 0),
metric_json.get('var_latency', 0),
metric_json['min_time'], metric_json['max_time'],
metric_json['bytes_download'])
except:
self.__lgr.error("%s: %s" %(table_name, traceback.format_exc()))
def __insert_ses_metrics(self, db_connection, ts, host_ip, metrics_dict):
try:
for res_hash, res_hash_values in metrics_dict.items():
for ses_hash, metric_json in res_hash_values.items():
profile_type = metric_json.get("profile_type", -1)
if(profile_type == UDP_CLIENT_PROFILE):
table_name = 'udp_client_ses_metrics'
self.__insert_with_ts(db_connection, table_name,
"'%s'"%ts, "'%s'"%host_ip, res_hash, ses_hash,
metric_json['sessions'], metric_json['cycles_complete'],
metric_json['good_connections'], metric_json['failed_connections'],
metric_json['reqs_sent'], metric_json['reqs_failed'],
metric_json['dg_sent'], metric_json['dg_size_sent'],
metric_json['dg_send_fail'], metric_json['resp_recd'],
metric_json['resp_timedout'], metric_json['dg_recd'],
metric_json['dg_size_recd'], metric_json['dg_recv_timedout'])
elif(profile_type == HTTP_PROFILE):
table_name = 'tcp_client_ses_metrics'
self.__insert_with_ts(db_connection, table_name,
"'%s'"%ts, "'%s'"%host_ip, res_hash, ses_hash,
metric_json['sessions'], metric_json['open_connections'],
metric_json['total_connections'], metric_json['cycles_complete'],
metric_json['reqs_sent'], metric_json['resp_rcvd'],
metric_json['http_gets_sent'], metric_json['http_gets_rcvd'],
metric_json['http_posts_sent'], metric_json['http_posts_rcvd'],
metric_json['failed_reqs'], metric_json['len_fail'],
metric_json['persist_fail'], metric_json['post_fnf'],
metric_json['bytes_download'], metric_json['complete_time'])
except:
self.__lgr.error("%s: %s" %(table_name, traceback.format_exc()))
def clear_tables(self):
clear_table_connection = connect(dbname=self.__config['db'], user=self.__config['user'], \
password=self.__config['password'], port=self.__config['port'])
delete_tables = ", ".join(self.__tables.keys())
delete_query = "TRUNCATE TABLE {} RESTART IDENTITY CASCADE".format(delete_tables)
self.__lgr.debug("Trying to TRUNCATE table with command {}".format(delete_query))
try:
with clear_table_connection.cursor() as cursor:
cursor.execute(delete_query)
clear_table_connection.commit()
self.__intialize_row_counters()
return True
except:
clear_table_connection.rollback()
self.__lgr.error(traceback.format_exc())
return False
finally:
clear_table_connection.close()
def insert_metrics_to_db(self, metrics):
try:
db_connection = connect(dbname=self.__config['db'], user=self.__config['user'], \
password=self.__config['password'], port=self.__config['port'])
ts = metrics['ts']
host_ip = metrics['host_ip']
#CLIENT VIP METRICS
metric_json = metrics.get('vip_metrics', {})
if(bool(metric_json)):
self.__insert_vip_metrics(db_connection, ts, host_ip, metric_json)
metric_json = metrics.get('ses_bucket_metrics', {})
if(bool(metric_json)):
self.__insert_vip_metrics(db_connection, ts, host_ip, metric_json, True)
#SERVER VIP METRICS
metric_json = metrics.get('server_vip_metrics', {})
if(bool(metric_json)):
self.__insert_server_vip_metrics(db_connection, ts, host_ip, metric_json)
#URL METRICS
metric_json = metrics.get('url_metrics', {})
if(bool(metric_json)):
self.__insert_url_metrics(db_connection, ts, host_ip, metric_json)
metric_json = metrics.get('url_bucket_metrics', {})
if(bool(metric_json)):
self.__insert_url_metrics(db_connection, ts, host_ip, metric_json, True)
#SES METRICS
metric_json = metrics.get('ses_metrics', {})
if(bool(metric_json)):
self.__insert_ses_metrics(db_connection, ts, host_ip, metric_json)
#ERROR METRICS
metric_json = metrics.get('error_metrics', {})
if(bool(metric_json)):
self.__insert_error_metrics(db_connection, ts, host_ip, metric_json)
#MEMORY METRICS
metric_json = metrics.get('memory_metrics', {})
if(bool(metric_json)):
self.__insert_memory_metrics(db_connection, ts, host_ip, metric_json)
except:
self.__lgr.error(traceback.format_exc())
def insert_configs(self, res_tag, res_hash, res, ses_tag, ses_hash, ses):
conn = connect(dbname=self.__config['db'], user=self.__config['user'], \
password=self.__config['password'], port=self.__config['port'])
try:
check_query = "SELECT EXISTS(SELECT 1 FROM {} WHERE {}='{}')"
result = self.__execute_query(conn, check_query.format("resource_configs", "res_hash", res_hash))
if bool(result[0]) and not(bool(result[0][0])):
self.__insert(conn, "resource_configs", res_hash, "'{}'".format(res_tag), \
"'{}'".format(json.dumps(res)))
elif not(bool(result[0])):
self.__lgr.error("Wrong DB Query")
result = self.__execute_query(conn, check_query.format("session_configs", "ses_hash", ses_hash))
if bool(result[0]) and not(bool(result[0][0])):
self.__insert(conn, "session_configs", ses_hash, "'{}'".format(ses_tag), \
"'{}'".format(json.dumps(ses)))
elif not(bool(result[0])):
self.__lgr.error("Wrong DB Query")
except:
self.__lgr.error(traceback.format_exc())
finally:
conn.close()
def insert_running_configs(self, host_ip, cpu, res_hash, ses_hash, traffic_mode, traffic_profile):
conn = connect(dbname=self.__config['db'], user=self.__config['user'], \
password=self.__config['password'], port=self.__config['port'])
try:
self.__lgr.debug("insert_running_configs Called")
self.__insert(conn, "running_configs", res_hash, ses_hash, "'{}'".format(traffic_mode),
"'{}'".format(traffic_profile), "'{}'".format(host_ip), cpu, \
"'{}'".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S")), "NULL")
except:
self.__lgr.error(traceback.format_exc())
finally:
conn.close()
def update_stop_time_running_configs(self, host_ip, cpu):
conn = connect(dbname=self.__config['db'], user=self.__config['user'], \
password=self.__config['password'], port=self.__config['port'])
try:
self.__lgr.debug("update_stop_time_running_configs Called")
update_query = """UPDATE running_configs SET end_time = {} WHERE
host_ip={} AND cpu={} AND end_time is NULL"""
result = self.__execute_query(conn, update_query.format(
"'{}'".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S")), \
"'{}'".format(host_ip), cpu), False)
if not bool(result):
self.__lgr.error("Wrong DB Query")
except:
self.__lgr.error(traceback.format_exc())
def update_running_configs(self, host_ip, cpu, res_hash, ses_hash, traffic_mode, traffic_profile):
self.__lgr.debug("update_running_configs Called")
self.update_stop_time_running_configs(host_ip, cpu)
self.insert_running_configs(host_ip, cpu, res_hash, ses_hash, traffic_mode, traffic_profile)
def __query_last_row(self, conn, cmd_last_row_number):
last_row = self.__execute_query(conn, cmd_last_row_number)
for details in last_row:
self.__lgr.debug("LAST ROW={}".format(details))
return details['id']
def __query_db(self, conn, cmd_query, cmd_last_row_number=None):
last_row_id = None
result = None
result = self.__execute_query(conn, cmd_query)
if cmd_last_row_number is not None:
last_row_id = self.__query_last_row(conn, cmd_last_row_number)
return result, last_row_id
def __get_sql_statements(self, mode, table_name, filter_clauses_param):
try:
where_clause = OrderedDict()
custom_keys = []
keys_list = []
filter_clauses = copy(filter_clauses_param)
self.__lgr.debug("%s filter_clauses => %s" %(table_name, str(filter_clauses)))
#Doesn't make sense to filter on both res_tag and res_hash at once
#Similar case with ses_tag and ses_hash
is_res_hash_filter_present = filter_clauses.get('res_hash',None)
is_res_tag_filter_present = filter_clauses.get('res_tag',None)
if is_res_hash_filter_present is not None and is_res_tag_filter_present is not None:
return False, "Not possible to filter on both res_hash and res_tag at once"
is_ses_hash_filter_present = filter_clauses.get('ses_hash',None)
is_ses_tag_filter_present = filter_clauses.get('ses_tag',None)
if is_ses_hash_filter_present is not None and is_ses_tag_filter_present is not None:
return False, "Not possible to filter on both ses_hash and ses_tag at once"
if(filter_clauses.get('ts_range', None) is not None):
start_appended = False
if(bool(filter_clauses['ts_range'])):
if(len(filter_clauses['ts_range']) != 2):
return False, "ts_range filter must possess start and end timestamps"
start, end = filter_clauses['ts_range']
if(start is not None):
where_clause['ts_range'] = "ts_ctrl >= '%s' " %start
start_appended = True
if(end is not None):
if(start_appended):
where_clause['ts_range'] += "AND ts_ctrl <= '%s' " %end
else:
where_clause['ts_range'] = "ts_ctrl <= '%s' " %end
filter_clauses.pop('ts_range', None)
for key, value in filter_clauses.items():
if value is None:
continue
if not isinstance(value, list):
return False, "%s filter must be a list" %key
custom_keys.append(key)
if(bool(value)):
where_clause[key] = ''
counter = 0
length = len(value)
for val in value:
if key == 'res_tag':
where_clause[key] += "resource_configs.%s = '%s' " %(key, str(val))
elif key == 'ses_tag':
where_clause[key] += "session_configs.%s = '%s' " %(key, str(val))
else:
where_clause[key] += "%s.%s = '%s' " %(table_name, key, str(val))
counter += 1
if(counter != length):
where_clause[key] += "OR "
join_statement = ''
if(filter_clauses.get('res_tag') is not None):
join_statement += \
"INNER JOIN resource_configs ON resource_configs.res_hash = %s.res_hash " %table_name
if(filter_clauses.get('ses_tag') is not None):
join_statement += \
"INNER JOIN session_configs ON session_configs.ses_hash = %s.ses_hash " %table_name
if(mode == "LAST_DIFF"):
where_clause['row_number'] = "id > %d " %self.__last_read_row[table_name]
if(bool(where_clause.values())):
where_statement = "WHERE (%s)" %(") AND (".join(where_clause.values()))
else:
where_statement = ''
for key in self.__ORDER_OF_GROUP_BY:
if(key in custom_keys or key in self.__default_select_keys[table_name]):
keys_list.append(key)
return True, (where_statement, join_statement, keys_list)
except:
return False, traceback.format_exc()
def __get_ses_metrics(self, db_connection, mode, table_name, filter_clauses_param):
status, statements = self.__get_sql_statements(mode, table_name, filter_clauses_param)
if(not(status)):
return False, statements
where_statement, join_statement, keys_list = statements
if(bool(keys_list)):
select_keys = ", ".join(keys_list)
group_by_statement = "GROUP BY %s" %select_keys
select_keys += ","
else:
group_by_statement = ""
select_keys = ""
sql_query = "SELECT %s %s FROM %s %s %s %s" \
%(select_keys, self.__metric_keys_as_csv[table_name], table_name, join_statement, where_statement,
group_by_statement)
sql_last_line = "SELECT max(id) AS id FROM %s;" %table_name
self.__lgr.debug("%s cmd='%s'" %(table_name, sql_query))
result, last_row = self.__query_db(db_connection, sql_query, sql_last_line)
if(result is None):
self.__lgr.error("TE_METRICS Unable to get result of query")
return False, "Got None during Query"
if(mode == "LAST_DIFF"):
if(last_row is None):
self.__lgr.error("TE_METRICS Unable to get last row id")
return False, "Got None as the last row ID"
else:
self.__last_read_row[table_name] = last_row
self.__lgr.debug("Last read row for %s is %d"
%(table_name, self.__last_read_row[table_name]))
return True, (keys_list, result)
def __get_vip_metrics(self, db_connection, mode, vip_table_name, url_table_name, filter_clauses_param):
url_keys_list = None
if(bool(url_table_name)):
status, statements = self.__get_sql_statements(mode, url_table_name, filter_clauses_param)
if(not(status)):
return False, statements
url_where_statement, url_join_statement, url_keys_list = statements
if(bool(url_keys_list)):
url_select_keys = []
url_join_statement_internal = []
for i in url_keys_list:
if i == "res_tag":
url_select_keys.append("resource_configs.res_tag")
elif i == "ses_tag":
url_select_keys.append("session_configs.ses_tag")
else:
url_select_keys.append("%s.%s" %(url_table_name, i))
url_join_statement_internal.append("%s.%s = temp.%s" %(url_table_name, i, i))
url_select_keys = ", ".join(url_select_keys)
url_group_by_statement = "GROUP BY %s" %url_select_keys
url_select_keys += ","
url_join_statement_internal = " AND ".join(url_join_statement_internal)
else:
url_group_by_statement = ""
url_select_keys = ""
url_join_statement_internal = ""
#Popping is necessary as `vip_metrics` table doesn't have those key fields
filter_clauses_param.pop('uri', None)
filter_clauses_param.pop('method', None)
status, statements = self.__get_sql_statements(mode, vip_table_name, filter_clauses_param)
if(not(status)):
return False, statements
vip_where_statement, vip_join_statement, vip_keys_list = statements
if(bool(vip_keys_list)):
vip_select_keys = ", ".join(vip_keys_list)
vip_group_by_statement = "GROUP BY %s" %vip_select_keys
vip_select_keys += ","
else:
vip_group_by_statement = ""
vip_select_keys = ""
if(bool(url_table_name)):
# URL metrics is not available for UDP SERVER
# and for UDP SERVER, the url_table_name will be None
sql_query = """
WITH vip_stats AS (
SELECT {} {} FROM {} {} {} {}
), url_stats AS (
WITH temp AS (
SELECT {}
sum(mean_latency * resp_rcvd) / NULLIF(sum(resp_rcvd), 0) AS net_mean
FROM {} {} {} {})
SELECT {} {},
sum(mean_latency * resp_rcvd) / NULLIF(sum(resp_rcvd), 0) AS mean_latency,
sqrt(sum(resp_rcvd * (power(mean_latency - temp.net_mean, 2) + var_latency)) /
NULLIF(sum(resp_rcvd), 0)) AS sd_latency
FROM {}
INNER JOIN temp ON {}
{} {} {}
) SELECT * from url_stats INNER JOIN vip_stats ON url_stats.vip = vip_stats.vip;
""".format(vip_select_keys, self.__metric_keys_as_csv[vip_table_name], vip_table_name, \
vip_join_statement, vip_where_statement, vip_group_by_statement,
url_select_keys, url_table_name, url_join_statement, url_where_statement, url_group_by_statement, \
url_select_keys, self.__metric_keys_as_csv[url_table_name], url_table_name, \
url_join_statement_internal,
url_join_statement, url_where_statement, url_group_by_statement)
else:
sql_query = """SELECT {} {} FROM {} {} {} {}""".format(vip_select_keys, \
self.__metric_keys_as_csv[vip_table_name], vip_table_name, \
vip_join_statement, vip_where_statement, vip_group_by_statement)
self.__lgr.debug("SQL command to get vip metrics={}".format(sql_query))
result, last_row = self.__query_db(db_connection, sql_query)
if(result is None):
self.__lgr.error("TE_METRICS Unable to get result of query")
return False, "Got None during Query"
if(mode == "LAST_DIFF"):
if(bool(url_table_name)):
# url_table_name will be None for UDP SERVER, and there is no `url_metrics`
# for UDP SERVER and so we are not querying
sql_last_line = "SELECT max(id) AS id FROM %s;" %url_table_name
last_row = self.__query_last_row(db_connection, sql_last_line)
if(last_row is None):
self.__lgr.error("TE_METRICS Unable to get last row id for {}",format(url_table_name))
return False, "Got None as the last row ID"
else:
self.__last_read_row[url_table_name] = last_row
self.__lgr.debug("Last read row for %s is %d"
%(url_table_name, self.__last_read_row[url_table_name]))
sql_last_line = "SELECT max(id) AS id FROM %s;" %vip_table_name
last_row = self.__query_last_row(db_connection, sql_last_line)
self.__lgr.debug("%s cmd='%s'" %(vip_table_name, sql_query))
if(last_row is None):
self.__lgr.error("TE_METRICS Unable to get last row id for {}",format(vip_table_name))
return False, "Got None as the last row ID"
else:
self.__last_read_row[vip_table_name] = last_row
self.__lgr.debug("Last read row for %s is %d"
%(vip_table_name, self.__last_read_row[vip_table_name]))
return True, (vip_keys_list, url_keys_list, result)
def __get_error_metrics(self, db_connection, mode, filter_clauses_param, error_group_interval):
table_name = "error_metrics"
status, statements = self.__get_sql_statements(mode, table_name, filter_clauses_param)
if(not(status)):
return False, statements
where_statement, join_statement, keys_list = statements
select_group_keys = ", ".join(keys_list)
sql_query = \
"WITH \
a AS ( \
SELECT %s, counter, ts_range \
, COALESCE(lower(ts_range),'-infinity') AS startdate \
, max(COALESCE(upper(ts_range), 'infinity')) OVER (ORDER BY ts_range) AS enddate \
FROM %s %s %s), \
b AS( \
SELECT *, lag(enddate) OVER (ORDER BY ts_range) < (startdate - (%d * interval '1 second')) \
OR NULL AS step FROM a), \
c AS ( \
SELECT *, count(step) OVER (ORDER BY ts_range) AS grp FROM b) \
SELECT %s, sum(counter), min(startdate) AS start_date, max(enddate) AS end_date FROM c\
GROUP BY %s, grp ORDER BY start_date;" \
%(select_group_keys, table_name, join_statement, where_statement, error_group_interval,
select_group_keys, select_group_keys)
sql_last_line = "SELECT max(id) AS id FROM %s;" %table_name
self.__lgr.debug("%s cmd='%s'" %(table_name, sql_query))
result, last_row = self.__query_db(db_connection, sql_query, sql_last_line)
if(result is None):
self.__lgr.error("TE_METRICS Unable to get result of query")
return False, "Got None during Query"
if(mode == "LAST_DIFF"):
if(last_row is None):
self.__lgr.error("TE_METRICS Unable to get last row id")
return False, "Got None as the last row ID"
else:
self.__last_read_row[table_name] = last_row
self.__lgr.debug("Last read row for %s is %d"
%(table_name, self.__last_read_row[table_name]))
return True, (keys_list, result)
def __get_memory_metrics(self, db_connection, mode, filter_clauses_param):
table_name = "memory_metrics"
status, statements = self.__get_sql_statements(mode, table_name, filter_clauses_param)
if(not(status)):
return False, statements
where_statement, join_statement, keys_list = statements
select_group_keys = ", ".join(keys_list)
sql_query = \
"SELECT %s, SUM(malloc[index]) AS malloc, SUM(free[index]) AS free FROM memory_metrics \
JOIN (select generate_subscripts(malloc, 1) AS index, id AS iter FROM memory_metrics) AS temp \
ON temp.iter = memory_metrics.id %s %s \
GROUP BY %s;" \
%(select_group_keys, join_statement, where_statement, select_group_keys)
sql_last_line = "SELECT max(id) AS id FROM %s;" %table_name
self.__lgr.debug("%s cmd='%s'" %(table_name, sql_query))
result, last_row = self.__query_db(db_connection, sql_query, sql_last_line)
if(result is None):
self.__lgr.error("TE_METRICS Unable to get result of query")
return False, "Got None during Query"
if(mode == "LAST_DIFF"):
if(last_row is None):
self.__lgr.error("TE_METRICS Unable to get last row id")
return False, "Got None as the last row ID"
else:
self.__last_read_row[table_name] = last_row
self.__lgr.debug("Last read row for %s is %d"
%(table_name, self.__last_read_row[table_name]))
return True, (keys_list, result)
def __get_latency_percentile(self, metric_json):
mean_lat = metric_json.pop("mean_latency", None)
sd_lat = metric_json.pop("sd_latency", None)
mean_lat = None if mean_lat == 'None' else mean_lat
sd_lat = None if sd_lat == 'None' else sd_lat
if bool(mean_lat) and bool(sd_lat):
mean_lat = float(mean_lat)
sd_lat = float(sd_lat)
values = np.random.normal(mean_lat, sd_lat, 10000)
metric_json['latency_mean'] = round(mean_lat, 5)
metric_json['latency_sd'] = round(sd_lat, 5)
metric_json['latency_p10'] = round( | np.percentile(values, 10) | numpy.percentile |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import ctypes
import numpy as np
from onnx.backend.test.case.node import _extract_value_info
import onnx
from onnx import TensorProto, helper, mapping, numpy_helper
import pycuda.driver as cuda
import tensorrt as trt
import tensorflow as tf
sys.path.append("..")
from python import *
os.chdir("../python/")
I_GPU = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(I_GPU)
tf.set_random_seed(1234)
np.random.seed(0)
ITERATIONS = 10
CONFIG = tf.ConfigProto()
CONFIG.gpu_options.allow_growth = True
INPUT_MODEL_FILE = "model/test_op_plugin.onnx"
OUTPUT_MODEL_FILE = "model/test_op_trt.onnx"
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
BATCH_SIZE = 1
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
# size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
size = trt.volume(engine.get_binding_shape(binding))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(
batch_size=batch_size, bindings=bindings, stream_handle=stream.handle
)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
def run_tf_graph(sess, input_data, input_node, output_node):
"""Generic function to execute tensorflow"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
# if len(input_node) == 1 and input_node[0] == "":
# output_data = sess.run(tensor)
# else:
output_data = sess.run(tensor, input_dict)
return output_data
def verify_tf_with_trt_result(in_data, in_name, out_name, op_name):
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_result = run_tf_graph(sess, in_data, in_name, out_name)
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, out_node
)
with open("model/test_op_{}.pb".format(op_name), "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
os.system(
"python3 -m tf2onnx.convert --input model/test_op_{}.pb --inputs {} --outputs {} --output {} --opset 11".format(
op_name, str(",").join(in_name), str(",").join(out_name), INPUT_MODEL_FILE
)
)
ops_name = [op_name]
trt_plugin_name = onnx2plugin(
INPUT_MODEL_FILE, OUTPUT_MODEL_FILE, node_names=ops_name
)
for plugin_name in trt_plugin_name:
ctypes.cdll.LoadLibrary("./trt_plugin/lib/{}.so".format(plugin_name))
cuda.Device(0).make_context()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_batch_size = batch_size
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 30
with open(OUTPUT_MODEL_FILE, "rb") as model:
# parse onnx model
parser.parse(model.read())
for i in range(parser.num_errors):
print(parser.get_error(i))
engine = builder.build_engine(network, builder_config)
if engine is None:
print("[ERROR] engine is None")
exit(-1)
inputs, outputs, bindings, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
for i in range(len(inputs)):
input_data = in_data[i].ravel()
np.copyto(inputs[i].host, input_data)
trt_result = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream,
batch_size=batch_size,
)
cuda.Context.pop()
ret = True
if len(trt_result) == 1:
ret = compare_tf_trt_result(tf_result, trt_result)
else:
for i in range(len(trt_result)):
ret &= compare_tf_trt_result(tf_result[i], trt_result[i])
assert ret, "result check False"
return ret
def compare_tf_trt_result(tf_result, trt_result):
print(tf_result)
print("================")
print(trt_result)
tf_reshape = np.array(tf_result).reshape(-1)
trt_reshape = np.array(trt_result).reshape(-1)
if (
isinstance(tf_result, list)
and isinstance(trt_result, list)
and len(tf_result) > 0
and len(trt_result) > 0
and np.isnan(tf_result[0]).any()
and np.isnan(trt_result[0]).any()
):
return True
elif (
isinstance(tf_result, list)
and isinstance(trt_result, list)
and len(tf_result) > 0
and len(trt_result) > 0
and np.isinf(tf_result[0]).any()
and np.isinf(trt_result[0]).any()
):
return True
print(
"trt cross_check output ",
str(np.allclose(tf_reshape.flatten(), trt_reshape.flatten(), atol=1e-5)),
flush=True,
)
return bool(np.allclose(tf_reshape.flatten(), trt_reshape.flatten(), atol=1e-5))
def get_onnxruntime_output(model, inputs):
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, "CPU")
if isinstance(inputs, list) and len(inputs) == 1:
inp = inputs[0]
else:
inp = inputs
output = rep.run(inp)
# Unpack output if there's only a single value.
if len(output) == 1:
output = output[0]
return output
def verify_with_ort_with_trt(
model,
inputs,
op_name,
opset=None,
dtype="float32",
opt_level=1,
np_result=None,
use_vm=False,
):
if opset is not None:
model.opset_import[0].version = opset
onnx.save(model, INPUT_MODEL_FILE)
if np_result is None:
ort_result = get_onnxruntime_output(model, inputs)
else:
ort_result = np_result
in_data = convert_to_list(inputs)
ops_name = [op_name]
trt_plugin_name = onnx2plugin(
INPUT_MODEL_FILE, OUTPUT_MODEL_FILE, node_names=ops_name
)
for plugin_name in trt_plugin_name:
ctypes.cdll.LoadLibrary("./trt_plugin/lib/{}.so".format(plugin_name))
cuda.Device(0).make_context()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_batch_size = BATCH_SIZE
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 30
with open(OUTPUT_MODEL_FILE, "rb") as model:
# parse onnx model
parser.parse(model.read())
for i in range(parser.num_errors):
print(parser.get_error(i))
engine = builder.build_engine(network, builder_config)
if engine is None:
print("[ERROR] engine is None")
exit(-1)
inputs, outputs, bindings, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
for i in range(len(inputs)):
input_data = in_data[i].ravel()
np.copyto(inputs[i].host, input_data)
trt_result = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream,
batch_size=BATCH_SIZE,
)
cuda.Context.pop()
ret = True
if len(trt_result) == 1:
ret = compare_tf_trt_result(ort_result, trt_result)
else:
for i in range(len(trt_result)):
ret &= compare_tf_trt_result(ort_result[i], trt_result[i])
assert ret, "result check False"
return ret
def make_constant_node(name, data_type, dims, vals):
return helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),
)
def make_onnx_model(node, inputs, outputs, name, **kwargs):
present_inputs = [x for x in node.input if (x != "")]
present_outputs = [x for x in node.output if (x != "")]
input_type_protos = [None] * len(inputs)
if "input_type_protos" in kwargs:
input_type_protos = kwargs[str("input_type_protos")]
del kwargs[str("input_type_protos")]
output_type_protos = [None] * len(outputs)
if "output_type_protos" in kwargs:
output_type_protos = kwargs[str("output_type_protos")]
del kwargs[str("output_type_protos")]
inputs_vi = [
_extract_value_info(arr, arr_name, input_type)
for arr, arr_name, input_type in zip(inputs, present_inputs, input_type_protos)
]
outputs_vi = [
_extract_value_info(arr, arr_name, output_type)
for arr, arr_name, output_type in zip(
outputs, present_outputs, output_type_protos
)
]
graph = helper.make_graph(
nodes=[node], name=name, inputs=inputs_vi, outputs=outputs_vi
)
kwargs[str("producer_name")] = "TRTPluginAutoGen-test"
model = onnx.helper.make_model(graph, **kwargs)
return model
def op_expect(node, inputs, outputs, op_type, op_name, np_result=None):
model = make_onnx_model(
node, inputs=inputs, outputs=outputs, name="test_{}".format(op_type)
)
verify_with_ort_with_trt(model, inputs, op_name, np_result=np_result)
# ====================================================================================
# ---UnitTest
# ====================================================================================
def test_abs():
op_name = "abs_0"
op_type = "Abs"
x = np.random.randn(3, 4, 5).astype(np.float32)
y = abs(x)
node = helper.make_node(op_type, inputs=["x"], outputs=["y"], name=op_name)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_acos():
op_name = "acos_0"
op_type = "Acos"
node = onnx.helper.make_node("Acos", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arccos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "acos_1"
op_type = "Acos"
node = onnx.helper.make_node("Acos", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.rand(3, 4, 5).astype(np.float32)
y = np.arccos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_and():
op_name = "and_0"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
# 2d
x = (np.random.randn(3, 4) > 0).astype(bool)
y = (np.random.randn(3, 4) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "and_1"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
x = (np.random.randn(3, 4, 5) > 0).astype(bool)
y = (np.random.randn(3, 4, 5) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "and_2"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
y = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_add():
op_name = "add_0"
op_type = "Add"
node = onnx.helper.make_node(
"Add", inputs=["x", "y"], outputs=["sum"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
op_expect(node, inputs=[x, y], outputs=[x + y], op_type=op_type, op_name=op_name)
op_name = "add_1"
op_type = "Add"
node = onnx.helper.make_node(
"Add", inputs=["x", "y"], outputs=["sum"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(5).astype(np.float32)
op_expect(node, inputs=[x, y], outputs=[x + y], op_type=op_type, op_name=op_name)
def test_argmax():
op_type = "ArgMax"
op_name = "argmax_0"
data = np.array([[2, 1, 3, 10], [3, 4, 5, 6]], dtype=np.float32)
keepdims = 1
axis = -1
node = onnx.helper.make_node(
"ArgMax",
inputs=["data"],
outputs=["result"],
keepdims=keepdims,
axis=axis,
name=op_name,
)
# result: [[1], [1]]
from onnx.backend.test.case.node.argmax import argmax_use_numpy
result = argmax_use_numpy(data, keepdims=keepdims, axis=axis)
op_expect(node, inputs=[data], outputs=[result], op_type=op_type, op_name=op_name)
op_name = "argmax_1"
node = onnx.helper.make_node(
"ArgMax",
inputs=["data"],
outputs=["result"],
keepdims=keepdims,
axis=axis,
name=op_name,
)
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [1, 3, 4]
result = argmax_use_numpy(data, keepdims=keepdims, axis=axis)
op_expect(node, inputs=[data], outputs=[result], op_type=op_type, op_name=op_name)
def test_argmin():
op_type = "ArgMin"
op_name = "argmin_0"
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
keepdims = 1
axis = 1
node = onnx.helper.make_node(
"ArgMin",
inputs=["data"],
outputs=["result"],
keepdims=keepdims,
axis=axis,
name=op_name,
)
# result: [[1], [1]]
from onnx.backend.test.case.node.argmin import argmin_use_numpy
result = argmin_use_numpy(data, keepdims=keepdims, axis=axis)
op_expect(node, inputs=[data], outputs=[result], op_type=op_type, op_name=op_name)
def test_asin():
op_name = "asin_0"
op_type = "Asin"
node = onnx.helper.make_node("Asin", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arcsin(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "asin_1"
op_type = "Asin"
node = onnx.helper.make_node("Asin", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.rand(3, 4, 5).astype(np.float32)
y = np.arcsin(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_asinh():
op_name = "asinh_0"
op_type = "Asinh"
node = onnx.helper.make_node("Asinh", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arcsinh(x) # expected output [-0.88137358, 0., 0.88137358]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "asinh_1"
op_type = "Asinh"
node = onnx.helper.make_node("Asinh", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arcsinh(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_atan():
op_type = "Atan"
op_name = "atan_0"
node = onnx.helper.make_node("Atan", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arctan(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_type = "Atan"
op_name = "atan_1"
node = onnx.helper.make_node("Atan", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arctan(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_atanh():
op_name = "atanh_0"
op_type = "Atanh"
node = onnx.helper.make_node("Atanh", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arctanh(x) # expected output [-0.54930615, 0., 0.54930615]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "atanh_1"
op_type = "Atanh"
node = onnx.helper.make_node("Atanh", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32)
y = np.arctanh(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_averagepool():
op_name = "averagepool_1d_default"
op_type = "AveragePool"
"""
input_shape: [1, 3, 32]
output_shape: [1, 3, 31]
"""
node = onnx.helper.make_node(
"AveragePool", inputs=["x"], outputs=["y"], kernel_shape=[2], name=op_name
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.shape(x)
kernel_shape = [2]
strides = [1]
from onnx.backend.test.case.node.pool_op_common import get_output_shape, pool
out_shape = get_output_shape("VALID", x_shape[2:], kernel_shape, strides)
padded = x
y = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], "AVG")
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "averagepool_2d_ceil"
op_type = "AveragePool"
node = onnx.helper.make_node(
"AveragePool",
inputs=["x"],
outputs=["y"],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
name=op_name,
)
x = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
]
).astype(np.float32)
y = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_batchnormalization():
op_name = "batchnormalization_0"
op_type = "BatchNormalization"
# input size: (2, 3, 4, 5)
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
mean = np.random.randn(3).astype(np.float32)
var = np.random.rand(3).astype(np.float32)
from onnx.backend.test.case.node.batchnorm import _batchnorm_test_mode
y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
node = onnx.helper.make_node(
"BatchNormalization",
inputs=["x", "s", "bias", "mean", "var"],
outputs=["y"],
name=op_name,
)
# output size: (2, 3, 4, 5)
op_expect(
node,
inputs=[x, s, bias, mean, var],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_ceil():
op_name = "ceil_0"
op_type = "Ceil"
node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1.5, 1.2]).astype(np.float32)
y = np.ceil(x) # expected output [-1., 2.]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "ceil_1"
op_type = "Ceil"
node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.ceil(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_celu():
op_name = "celu_0"
op_type = "Celu"
alpha = 2.0
node = onnx.helper.make_node(
"Celu", inputs=["X"], outputs=["Y"], alpha=alpha, name=op_name
)
input_data = np.array(
[
[
[[0.8439683], [0.5665144], [0.05836735]],
[[0.02916367], [0.12964272], [0.5060197]],
[[0.79538304], [0.9411346], [0.9546573]],
],
[
[[0.17730942], [0.46192095], [0.26480448]],
[[0.6746842], [0.01665257], [0.62473077]],
[[0.9240844], [0.9722341], [0.11965699]],
],
[
[[0.41356155], [0.9129373], [0.59330076]],
[[0.81929934], [0.7862604], [0.11799799]],
[[0.69248444], [0.54119414], [0.07513223]],
],
],
dtype=np.float32,
)
# Calculate expected output data
positive_input = np.maximum(0, input_data)
negative_input = np.minimum(0, alpha * (np.exp(input_data / alpha) - 1))
expected_output = positive_input + negative_input
op_expect(
node,
inputs=[input_data],
outputs=[expected_output],
op_type=op_type,
op_name=op_name,
)
def test_clip():
op_name = "Clip_0"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-2, 0, 2]).astype(np.float32)
min_val = np.array([-1.0]).astype(np.float32) # .float32(-1.0)
max_val = np.array([1.0]).astype(np.float32) # .float32(1.0)
y = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_1"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, min_val, max_val)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_2"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
min_val = np.array([-5.0]).astype(np.float32) # .float32(-1.0)
max_val = np.array([5.0]).astype(np.float32) # .float32(1.0)
op_name = "Clip_3"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.array([-1, 0, 1]).astype(np.float32)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_4"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-6, 0, 6]).astype(np.float32)
y = np.array([-5, 0, 5]).astype(np.float32)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_5"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-1, 0, 6]).astype(np.float32)
y = np.array([-1, 0, 5]).astype(np.float32)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_concat():
test_cases = {
"1d": ([1, 2], [3, 4]),
"2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),
"3d": (
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]],
),
} # type: Dict[Text, Sequence[Any]]
for test_case, values_ in test_cases.items():
values = [np.asarray(v, dtype=np.float32) for v in values_]
for i in range(len(values[0].shape)):
op_name = "concat_{}_{}".format(test_case, i)
op_type = "Concat"
in_args = ["value" + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
"Concat",
inputs=[s for s in in_args],
outputs=["output"],
axis=i,
name=op_name,
)
output = np.concatenate(values, i)
op_expect(
node,
inputs=[v for v in values],
outputs=[output],
op_type=op_type,
op_name=op_name,
)
for i in range(-len(values[0].shape), 0):
op_name = "concat_{}_1_{}".format(test_case, abs(i))
op_type = "Concat"
in_args = ["value" + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
"Concat",
inputs=[s for s in in_args],
outputs=["output"],
axis=i,
name=op_name,
)
output = np.concatenate(values, i)
op_expect(
node,
inputs=[v for v in values],
outputs=[output],
op_type=op_type,
op_name=op_name,
)
def test_conv():
# ------Conv
op_name, op_type = "test_basic_conv_with_padding", "Conv"
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
# Convolution with padding
node_with_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[1, 1, 1, 1],
name=op_name,
)
y_with_padding = np.array(
[
[
[
[12.0, 21.0, 27.0, 33.0, 24.0], # (1, 1, 5, 5) output tensor
[33.0, 54.0, 63.0, 72.0, 51.0],
[63.0, 99.0, 108.0, 117.0, 81.0],
[93.0, 144.0, 153.0, 162.0, 111.0],
[72.0, 111.0, 117.0, 123.0, 84.0],
]
]
]
).astype(np.float32)
op_expect(
node_with_padding,
inputs=[x, W],
outputs=[y_with_padding],
op_type=op_type,
op_name=op_name,
)
op_name, op_type = "test_basic_conv_without_padding", "Conv"
# Convolution without padding
node_without_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[0, 0, 0, 0],
name=op_name,
)
y_without_padding = np.array(
[
[
[
[54.0, 63.0, 72.0], # (1, 1, 3, 3) output tensor
[99.0, 108.0, 117.0],
[144.0, 153.0, 162.0],
]
]
]
).astype(np.float32)
op_expect(
node_without_padding,
inputs=[x, W],
outputs=[y_without_padding],
op_type=op_type,
op_name=op_name,
)
# conv_with_autopad_same
op_name, op_type = "test_conv_with_autopad_same", "Conv"
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
# Convolution with auto_pad='SAME_LOWER' and strides=2
node = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
auto_pad="SAME_LOWER",
kernel_shape=[3, 3],
strides=[2, 2],
name=op_name,
)
y = np.array(
[[[[12.0, 27.0, 24.0], [63.0, 108.0, 81.0], [72.0, 117.0, 84.0]]]]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
# conv_with_strides
op_name, op_type = "test_conv_with_strides_padding", "Conv"
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 7, 5) input tensor
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
[25.0, 26.0, 27.0, 28.0, 29.0],
[30.0, 31.0, 32.0, 33.0, 34.0],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
# Convolution with strides=2 and padding
node_with_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[
2,
2,
], # Default values for other attributes: dilations=[1, 1], groups=1
name=op_name,
)
y_with_padding = np.array(
[
[
[
[12.0, 27.0, 24.0], # (1, 1, 4, 3) output tensor
[63.0, 108.0, 81.0],
[123.0, 198.0, 141.0],
[112.0, 177.0, 124.0],
]
]
]
).astype(np.float32)
op_expect(
node_with_padding,
inputs=[x, W],
outputs=[y_with_padding],
op_type=op_type,
op_name=op_name,
)
op_name = "test_conv_with_strides_no_padding"
# Convolution with strides=2 and no padding
node_without_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
pads=[0, 0, 0, 0],
strides=[
2,
2,
], # Default values for other attributes: dilations=[1, 1], groups=1
name=op_name,
)
y_without_padding = np.array(
[[[[54.0, 72.0], [144.0, 162.0], [234.0, 252.0]]]] # (1, 1, 3, 2) output tensor
).astype(np.float32)
op_expect(
node_without_padding,
inputs=[x, W],
outputs=[y_without_padding],
op_type=op_type,
op_name=op_name,
)
op_name = "test_conv_with_strides_and_asymmetric_padding"
# Convolution with strides=2 and padding only along one dimension (the H dimension in NxCxHxW tensor)
node_with_asymmetric_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
pads=[1, 0, 1, 0],
strides=[
2,
2,
], # Default values for other attributes: dilations=[1, 1], groups=1
name=op_name,
)
y_with_asymmetric_padding = np.array(
[
[
[
[21.0, 33.0], # (1, 1, 4, 2) output tensor
[99.0, 117.0],
[189.0, 207.0],
[171.0, 183.0],
]
]
]
).astype(np.float32)
op_expect(
node_with_asymmetric_padding,
inputs=[x, W],
outputs=[y_with_asymmetric_padding],
op_type=op_type,
op_name=op_name,
)
def test_convtranspose():
op_name, op_type = "test_convtranspose", "ConvTranspose"
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], name=op_name)
y = np.array(
[
[
[
[0.0, 1.0, 3.0, 3.0, 2.0], # (1, 2, 5, 5)
[3.0, 8.0, 15.0, 12.0, 7.0],
[9.0, 21.0, 36.0, 27.0, 15.0],
[9.0, 20.0, 33.0, 24.0, 13.0],
[6.0, 13.0, 21.0, 15.0, 8.0],
],
[
[0.0, 1.0, 3.0, 3.0, 2.0],
[3.0, 8.0, 15.0, 12.0, 7.0],
[9.0, 21.0, 36.0, 27.0, 15.0],
[9.0, 20.0, 33.0, 24.0, 13.0],
[6.0, 13.0, 21.0, 15.0, 8.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_convtranspose_1d", "ConvTranspose"
x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32) # (1, 1, 3)
#NOCC:invalid-name(其他:onnx example)
W = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype(np.float32) # (1, 2, 3)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], name=op_name)
y = np.array(
[[[0.0, 1.0, 3.0, 3.0, 2.0], [0.0, 1.0, 3.0, 3.0, 2.0]]] # (1, 2, 5)
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_convtranspose_3d", "ConvTranspose"
x = np.array(
[
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 3, 4, 5)
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
],
[
[20.0, 21.0, 22.0, 23.0, 24.0],
[25.0, 26.0, 27.0, 28.0, 29.0],
[30.0, 31.0, 32.0, 33.0, 34.0],
[35.0, 36.0, 37.0, 38.0, 39.0],
],
[
[40.0, 41.0, 42.0, 43.0, 44.0],
[45.0, 46.0, 47.0, 48.0, 49.0],
[50.0, 51.0, 52.0, 53.0, 54.0],
[55.0, 56.0, 57.0, 58.0, 59.0],
],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[
[1.0, 1.0, 1.0], # (1, 2, 3, 3, 3)
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
]
]
).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], name=op_name)
y = np.array(
[
[
[
[
[0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0], # (1, 2, 5, 6, 7)
[5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],
[15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],
[30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],
[25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],
[15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],
],
[
[20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],
[50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],
[90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],
[120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],
[90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],
[50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],
],
[
[60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],
[135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],
[225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],
[270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],
[195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],
[105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],
],
[
[60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],
[130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],
[210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],
[240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],
[170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],
[90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],
],
[
[40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],
[85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],
[135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],
[150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],
[105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],
[55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],
],
],
[
[
[0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0],
[5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],
[15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],
[30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],
[25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],
[15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],
],
[
[20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],
[50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],
[90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],
[120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],
[90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],
[50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],
],
[
[60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],
[135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],
[225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],
[270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],
[195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],
[105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],
],
[
[60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],
[130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],
[210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],
[240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],
[170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],
[90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],
],
[
[40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],
[85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],
[135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],
[150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],
[105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],
[55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],
],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_convtranspose_pads", "ConvTranspose"
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
node = onnx.helper.make_node(
"ConvTranspose",
["X", "W"],
["Y"],
strides=[3, 2],
pads=[1, 2, 1, 2],
name=op_name,
)
y = np.array(
[
[
[
[1.0, 1.0, 3.0], # (1, 2, 7, 3)
[1.0, 1.0, 3.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[13.0, 7.0, 15.0],
[13.0, 7.0, 15.0],
],
[
[1.0, 1.0, 3.0],
[1.0, 1.0, 3.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[13.0, 7.0, 15.0],
[13.0, 7.0, 15.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
def test_cos():
op_name, op_type = "test_cos_example", "Cos"
node = onnx.helper.make_node("Cos", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.cos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_cos", "Cos"
node = onnx.helper.make_node("Cos", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.cos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_cosh():
op_name, op_type = "test_cosh_example", "Cosh"
node = onnx.helper.make_node("Cosh", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.cosh(x) # expected output [1.54308069, 1., 1.54308069]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_cosh", "Cosh"
node = onnx.helper.make_node("Cosh", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.cosh(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_depthtospace():
op_name, op_type = "test_depthtospace_crd_mode_example", "DepthToSpace"
node = onnx.helper.make_node(
"DepthToSpace",
inputs=["x"],
outputs=["y"],
blocksize=2,
mode="CRD",
name=op_name,
)
# (1, 8, 2, 3) input tensor
x = np.array(
[
[
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],
[[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],
[[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],
[[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],
[[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],
[[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],
[[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],
]
]
).astype(np.float32)
# (1, 2, 4, 6) output tensor
y = np.array(
[
[
[
[0.0, 9.0, 1.0, 10.0, 2.0, 11.0],
[18.0, 27.0, 19.0, 28.0, 20.0, 29.0],
[3.0, 12.0, 4.0, 13.0, 5.0, 14.0],
[21.0, 30.0, 22.0, 31.0, 23.0, 32.0],
],
[
[36.0, 45.0, 37.0, 46.0, 38.0, 47.0],
[54.0, 63.0, 55.0, 64.0, 56.0, 65.0],
[39.0, 48.0, 40.0, 49.0, 41.0, 50.0],
[57.0, 66.0, 58.0, 67.0, 59.0, 68.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_depthtospace_example"
node = onnx.helper.make_node(
"DepthToSpace",
inputs=["x"],
outputs=["y"],
blocksize=2,
mode="DCR",
name=op_name,
)
# (1, 8, 2, 3) input tensor
x = np.array(
[
[
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],
[[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],
[[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],
[[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],
[[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],
[[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],
[[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],
]
]
).astype(np.float32)
# (1, 2, 4, 6) output tensor
y = np.array(
[
[
[
[0.0, 18.0, 1.0, 19.0, 2.0, 20.0],
[36.0, 54.0, 37.0, 55.0, 38.0, 56.0],
[3.0, 21.0, 4.0, 22.0, 5.0, 23.0],
[39.0, 57.0, 40.0, 58.0, 41.0, 59.0],
],
[
[9.0, 27.0, 10.0, 28.0, 11.0, 29.0],
[45.0, 63.0, 46.0, 64.0, 47.0, 65.0],
[12.0, 30.0, 13.0, 31.0, 14.0, 32.0],
[48.0, 66.0, 49.0, 67.0, 50.0, 68.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_div():
op_name, op_type = "test_div_example", "Div"
node = onnx.helper.make_node("Div", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([3, 4]).astype(np.float32)
y = np.array([1, 2]).astype(np.float32)
z = x / y # expected output [3., 2.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name, op_type = "test_div", "Div"
node = onnx.helper.make_node("Div", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(3, 4, 5).astype(np.float32) + 1.0
z = x / y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name, op_type = "test_div_bcast", "Div"
node = onnx.helper.make_node("Div", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(5).astype(np.float32) + 1.0
z = x / y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_einsum():
op_name, op_type = "test_einsum_batch_diagonal", "Einsum"
eqn = "...ii ->...i"
node = onnx.helper.make_node(
"Einsum", inputs=["x"], outputs=["y"], equation=eqn, name=op_name
)
#NOCC:invalid-name(其他:onnx example)
X = np.random.randn(3, 5, 5).astype(np.float32)
from onnx.backend.test.case.node.einsum import einsum_reference_implementation
#NOCC:invalid-name(其他:onnx example)
Z = einsum_reference_implementation(eqn, (X,))
op_expect(node, inputs=[X], outputs=[Z], op_type=op_type, op_name=op_name)
def test_elu():
op_name, op_type = "test_elu_example", "Elu"
node = onnx.helper.make_node(
"Elu", inputs=["x"], outputs=["y"], alpha=2.0, name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
# expected output [-1.2642411, 0., 1.]
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_elu", "Elu"
node = onnx.helper.make_node(
"Elu", inputs=["x"], outputs=["y"], alpha=2.0, name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_elu_default", "Elu"
default_alpha = 1.0
node = onnx.helper.make_node("Elu", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_erf():
op_name, op_type = "test_erf", "Erf"
node = onnx.helper.make_node("Erf", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(1, 3, 32, 32).astype(np.float32)
import math
y = np.vectorize(math.erf)(x).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_exp():
op_name, op_type = "test_exp_example", "Exp"
node = onnx.helper.make_node("Exp", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.exp(x) # expected output [0.36787945, 1., 2.71828175]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_exp", "Exp"
node = onnx.helper.make_node("Exp", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.exp(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_eyelike():
op_name, op_type = "test_eyelike_populate_off_main_diagonal", "EyeLike"
shape = (4, 5)
off_diagonal_offset = 1
node = onnx.helper.make_node(
"EyeLike",
inputs=["x"],
outputs=["y"],
k=off_diagonal_offset,
dtype=onnx.TensorProto.FLOAT,
name=op_name,
)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_eyelike_with_dtype"
shape = (3, 4)
node = onnx.helper.make_node(
"EyeLike",
inputs=["x"],
outputs=["y"],
dtype=onnx.TensorProto.FLOAT,
name=op_name,
)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
y = np.eye(shape[0], shape[1], dtype=np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_eyelike_without_dtype"
shape = (4, 4)
node = onnx.helper.make_node("EyeLike", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
y = np.eye(shape[0], shape[1], dtype=np.int32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_floor():
op_name, op_type = "test_floor_example", "Floor"
node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1.5, 1.2, 2]).astype(np.float32)
y = np.floor(x) # expected output [-2., 1., 2.]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_floor", "Floor"
node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.floor(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def verify_rnn(
seq_length,
batch_size,
input_size,
hidden_size,
rnn_type="LSTM",
use_bias=False,
activations=None,
alphas=None,
betas=None,
use_initial_state=False,
use_peep=False,
linear_before_reset=False,
op_name=None,
):
if rnn_type == "LSTM":
multiplier = 4
elif rnn_type == "GRU":
multiplier = 3
else:
raise NotImplementedError("%s RNNs not yet supported." % rnn_type)
x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype(
"float32"
)
w_np = np.random.uniform(size=(1, multiplier * hidden_size, input_size)).astype(
"float32"
)
r_np = np.random.uniform(size=(1, multiplier * hidden_size, hidden_size)).astype(
"float32"
)
input_names = ["X", "W", "R"]
input_tensors = [
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_np.shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_np.shape)),
helper.make_tensor_value_info("R", TensorProto.FLOAT, list(r_np.shape)),
]
input_values = [x_np, w_np, r_np]
if use_bias:
b_np = np.random.uniform(size=(1, multiplier * 2 * hidden_size)).astype(
"float32"
)
input_names.append("B")
input_tensors.append(
helper.make_tensor_value_info(
"B", TensorProto.FLOAT, [1, multiplier * 2 * hidden_size]
)
)
input_values.append(b_np)
if use_initial_state:
assert use_bias is True, "Initial states must have bias specified."
sequence_np = np.repeat(seq_length, batch_size).astype("int32")
input_names.append("sequence_lens")
input_tensors.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, [batch_size]
)
)
input_values.append(sequence_np)
initial_h_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype(
"float32"
)
input_names.append("initial_h")
input_tensors.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_h_np)
if rnn_type == "LSTM":
initial_c_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype(
"float32"
)
input_names.append("initial_c")
input_tensors.append(
helper.make_tensor_value_info(
"initial_c", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_c_np)
if use_peep and rnn_type == "LSTM":
assert (
use_initial_state is True
), "Peepholes require initial state to be specified."
p_np = np.random.uniform(size=(1, 3 * hidden_size)).astype("float32")
input_names.append("P")
input_tensors.append(
helper.make_tensor_value_info("P", TensorProto.FLOAT, [1, 3 * hidden_size])
)
input_values.append(p_np)
#NOCC:invalid-name(其他:onnx example)
Y_shape = [seq_length, 1, batch_size, hidden_size]
#NOCC:invalid-name(其他:onnx example)
Y_h_shape = [1, batch_size, hidden_size]
outputs = ["Y", "Y_h"]
graph_outputs = [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(Y_shape)),
helper.make_tensor_value_info("Y_h", TensorProto.FLOAT, list(Y_h_shape)),
]
output_shapes = [Y_shape, Y_h_shape]
if rnn_type == "LSTM":
#NOCC:invalid-name(其他:onnx example)
Y_c_shape = [1, batch_size, hidden_size]
outputs.append("Y_c")
graph_outputs.append(
helper.make_tensor_value_info("Y_c", TensorProto.FLOAT, list(Y_c_shape))
)
output_shapes.append(Y_c_shape)
rnn_node = helper.make_node(
rnn_type,
inputs=input_names,
outputs=outputs,
hidden_size=hidden_size,
name=op_name,
)
if activations is not None:
activations_attr = helper.make_attribute("activations", activations)
rnn_node.attribute.append(activations_attr)
if alphas is not None:
alphas_attr = helper.make_attribute("activation_alpha", alphas)
rnn_node.attribute.append(alphas_attr)
if betas is not None:
betas_attr = helper.make_attribute("activation_beta", betas)
rnn_node.attribute.append(betas_attr)
if linear_before_reset and rnn_type == "GRU":
lbr_attr = helper.make_attribute("linear_before_reset", 1)
rnn_node.attribute.append(lbr_attr)
graph = helper.make_graph(
[rnn_node], "rnn_test", inputs=input_tensors, outputs=graph_outputs
)
model = helper.make_model(graph, producer_name="rnn_test")
verify_with_ort_with_trt(model, input_values, op_name)
def test_gather():
op_name, op_type = "test_gather_0", "Gather"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=0, name=op_name
)
data = np.random.randn(5, 4, 3, 2).astype(np.float32)
indices = np.array([0, 1, 3])
y = np.take(data, indices, axis=0)
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_1"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=1, name=op_name
)
data = np.random.randn(5, 4, 3, 2).astype(np.float32)
indices = np.array([0, 1, 3])
y = np.take(data, indices, axis=1)
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_2d_indices"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=1, name=op_name
)
data = np.random.randn(3, 3).astype(np.float32)
indices = np.array([[0, 2]])
y = np.take(data, indices, axis=1)
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_negative_indices"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=0, name=op_name
)
data = np.arange(10).astype(np.float32)
indices = np.array([0, -9, -10])
y = np.take(data, indices, axis=0)
# print(y)
# [0. 1. 0.]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_gatherelement():
op_name, op_type = "test_gather_elements_0", "GatherElements"
axis = 1
node = onnx.helper.make_node(
"GatherElements",
inputs=["data", "indices"],
outputs=["y"],
axis=axis,
name=op_name,
)
data = np.array([[1, 2], [3, 4]], dtype=np.float32)
indices = np.array([[0, 0], [1, 0]], dtype=np.int32)
from onnx.backend.test.case.node.gatherelements import gather_elements
y = gather_elements(data, indices, axis)
# print(y) produces
# [[1, 1],
# [4, 3]]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_elements_1"
axis = 0
node = onnx.helper.make_node(
"GatherElements",
inputs=["data", "indices"],
outputs=["y"],
axis=axis,
name=op_name,
)
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[1, 2, 0], [2, 0, 0]], dtype=np.int32)
y = gather_elements(data, indices, axis)
# print(y) produces
# [[4, 8, 3],
# [7, 2, 3]]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_elements_negative_indices"
axis = 0
node = onnx.helper.make_node(
"GatherElements",
inputs=["data", "indices"],
outputs=["y"],
axis=axis,
name=op_name,
)
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32)
y = gather_elements(data, indices, axis)
# print(y) produces
# [[7, 5, 3],
# [4, 2, 3]]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_gathernd():
op_name, op_type = "test_gathernd_example_float32", "GatherND"
node = onnx.helper.make_node(
"GatherND", inputs=["data", "indices"], outputs=["output"], name=op_name
)
data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)
indices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)
from onnx.backend.test.case.node.gathernd import gather_nd_impl
output = gather_nd_impl(data, indices, 0)
expected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)
assert np.array_equal(output, expected_output)
op_expect(
node, inputs=[data, indices], outputs=[output], op_type=op_type, op_name=op_name
)
op_name = "test_gathernd_example_int32"
node = onnx.helper.make_node(
"GatherND", inputs=["data", "indices"], outputs=["output"], name=op_name
)
data = np.array([[0, 1], [2, 3]], dtype=np.int32)
indices = np.array([[0, 0], [1, 1]], dtype=np.int64)
output = gather_nd_impl(data, indices, 0)
expected_output = np.array([0, 3], dtype=np.int32)
assert np.array_equal(output, expected_output)
op_expect(
node, inputs=[data, indices], outputs=[output], op_type=op_type, op_name=op_name
)
op_name = "test_gathernd_example_int32_batch_dim1"
node = onnx.helper.make_node(
"GatherND",
inputs=["data", "indices"],
outputs=["output"],
batch_dims=1,
name=op_name,
)
data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)
indices = np.array([[1], [0]], dtype=np.int64)
output = gather_nd_impl(data, indices, 1)
expected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)
assert np.array_equal(output, expected_output)
op_expect(
node, inputs=[data, indices], outputs=[output], op_type=op_type, op_name=op_name
)
def test_gemm():
op_name, op_type = "test_gemm_all_attributes", "Gemm"
node = onnx.helper.make_node(
"Gemm",
inputs=["a", "b", "c"],
outputs=["y"],
alpha=0.25,
beta=0.35,
transA=1,
transB=1,
name=op_name,
)
a = np.random.ranf([4, 3]).astype(np.float32)
b = np.random.ranf([5, 4]).astype(np.float32)
c = np.random.ranf([1, 5]).astype(np.float32)
from onnx.backend.test.case.node.gemm import gemm_reference_implementation
y = gemm_reference_implementation(
a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35
)
op_expect(node, inputs=[a, b, c], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_gemm_alpha"
node = onnx.helper.make_node(
"Gemm", inputs=["a", "b", "c"], outputs=["y"], alpha=0.5, name=op_name
)
a = np.random.ranf([3, 5]).astype(np.float32)
b = np.random.ranf([5, 4]).astype(np.float32)
c = np.zeros([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, alpha=0.5)
op_expect(node, inputs=[a, b, c], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_gemm_beta"
node = onnx.helper.make_node(
"Gemm", inputs=["a", "b", "c"], outputs=["y"], beta=0.5, name=op_name
)
a = np.random.ranf([2, 7]).astype(np.float32)
b = np.random.ranf([7, 4]).astype(np.float32)
c = np.random.ranf([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, beta=0.5)
op_expect(node, inputs=[a, b, c], outputs=[y], op_type=op_type, op_name=op_name)
def test_globalaveragepool():
op_name, op_type = "test_globalaveragepool", "GlobalAveragePool"
node = onnx.helper.make_node(
"GlobalAveragePool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.randn(1, 3, 5, 5).astype(np.float32)
y = np.mean(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_globalaveragepool_precomputed"
node = onnx.helper.make_node(
"GlobalAveragePool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array(
[
[
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]
]
]
).astype(np.float32)
y = np.array([[[[5]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_globalmaxpool():
op_name = "test_globalmaxpool"
op_type = "GlobalMaxPool"
node = onnx.helper.make_node(
"GlobalMaxPool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.randn(1, 3, 5, 5).astype(np.float32)
y = np.max(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_globalmaxpool_precomputed"
node = onnx.helper.make_node(
"GlobalMaxPool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array(
[
[
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]
]
]
).astype(np.float32)
y = np.array([[[[9]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_hardsigmoid():
op_name, op_type = "test_hardsigmoid_example", "HardSigmoid"
node = onnx.helper.make_node(
"HardSigmoid", inputs=["x"], outputs=["y"], alpha=0.5, beta=0.6, name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.clip(x * 0.5 + 0.6, 0, 1) # expected output [0.1, 0.6, 1.]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_hardsigmoid"
node = onnx.helper.make_node(
"HardSigmoid", inputs=["x"], outputs=["y"], alpha=0.5, beta=0.6, name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x * 0.5 + 0.6, 0, 1)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_hardsigmoid_default"
default_alpha = 0.2
default_beta = 0.5
node = onnx.helper.make_node(
"HardSigmoid", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x * default_alpha + default_beta, 0, 1)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_hardswish():
op_name, op_type = "test_hardswish", "HardSwish"
node = onnx.helper.make_node("HardSwish", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
from onnx.backend.test.case.node.hardswish import hardswish
y = hardswish(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_hardmax():
op_name, op_type = "test_hardmax_example", "Hardmax"
node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(
np.float32
)
# expect result:
# [[1. 0. 0. 0.]
# [0. 1. 0. 0.]
# [0. 0. 1. 0.]
# [0. 0. 0. 1.]]
from onnx.backend.test.case.node.hardmax import hardmax
y = hardmax(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_identity():
op_name, op_type = "test_identity", "Identity"
node = onnx.helper.make_node("Identity", inputs=["x"], outputs=["y"], name=op_name)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
op_expect(node, inputs=[data], outputs=[data], op_type=op_type, op_name=op_name)
def test_instancenormalization():
op_name, op_type = "test_instancenorm_example", "InstanceNormalization"
def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore
dims_x = len(x.shape)
axis = tuple(range(2, dims_x))
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
# input size: (1, 2, 1, 3)
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
s = np.array([1.0, 1.5]).astype(np.float32)
bias = np.array([0, 1]).astype(np.float32)
y = _instancenorm_test_mode(x, s, bias).astype(np.float32)
node = onnx.helper.make_node(
"InstanceNormalization", inputs=["x", "s", "bias"], outputs=["y"], name=op_name
)
# output size: (1, 2, 1, 3)
op_expect(node, inputs=[x, s, bias], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_instancenorm_epsilon"
# input size: (2, 3, 4, 5)
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
epsilon = 1e-2
y = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)
node = onnx.helper.make_node(
"InstanceNormalization",
inputs=["x", "s", "bias"],
outputs=["y"],
epsilon=epsilon,
name=op_name,
)
# output size: (2, 3, 4, 5)
op_expect(node, inputs=[x, s, bias], outputs=[y], op_type=op_type, op_name=op_name)
def test_leakyrelu():
op_name, op_type = "test_leakyrelu_example", "LeakyRelu"
node = onnx.helper.make_node(
"LeakyRelu", inputs=["x"], outputs=["y"], alpha=0.1, name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
# expected output [-0.1, 0., 1.]
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_leakyrelu"
node = onnx.helper.make_node(
"LeakyRelu", inputs=["x"], outputs=["y"], alpha=0.1, name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_leakyrelu_default"
default_alpha = 0.01
node = onnx.helper.make_node("LeakyRelu", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_log():
op_name = "test_log_example"
op_type = "Log"
node = onnx.helper.make_node("Log", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([1, 10]).astype(np.float32)
y = np.log(x) # expected output [0., 2.30258512]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_log"
node = onnx.helper.make_node("Log", inputs=["x"], outputs=["y"], name=op_name)
x = np.exp(np.random.randn(3, 4, 5).astype(np.float32))
y = np.log(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_logsoftmax():
op_name, op_type = "test_logsoftmax_example_1", "LogSoftmax"
node = onnx.helper.make_node(
"LogSoftmax", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
# expected output
# [[-2.4076061 -1.407606 -0.407606 ]]
from onnx.backend.test.case.node.logsoftmax import logsoftmax
y = logsoftmax(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)
axis_order = [0, 1, -1]
for axis in axis_order:
op_name = "test_logsoftmax_axis_{}".format(str(axis + 1))
node = onnx.helper.make_node(
"LogSoftmax", inputs=["x"], outputs=["y"], axis=axis, name=op_name
)
y = logsoftmax(x, axis=axis)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_matmul():
op_name, op_type = "test_matmul_2d", "MatMul"
node = onnx.helper.make_node(
"MatMul", inputs=["a", "b"], outputs=["c"], name=op_name
)
# 2d
a = np.random.randn(3, 4).astype(np.float32)
b = np.random.randn(4, 3).astype(np.float32)
c = np.matmul(a, b)
op_expect(node, inputs=[a, b], outputs=[c], op_type=op_type, op_name=op_name)
def test_max():
op_name = "test_max_example"
op_type = "Max"
data_0 = np.array([3, 2, 1]).astype(np.float32)
data_1 = np.array([1, 4, 4]).astype(np.float32)
data_2 = np.array([2, 5, 3]).astype(np.float32)
result = np.array([3, 5, 4]).astype(np.float32)
node = onnx.helper.make_node(
"Max", inputs=["data_0", "data_1", "data_2"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1, data_2],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
op_name = "test_max_two_inputs"
result = np.maximum(data_0, data_1)
node = onnx.helper.make_node(
"Max", inputs=["data_0", "data_1"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
def _test_maxpool_2d_ceil():
op_name, op_type = "test_maxpool_2d_ceil", "MaxPool"
node = onnx.helper.make_node(
"MaxPool",
inputs=["x"],
outputs=["y"],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
name=op_name,
)
x = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
]
).astype(np.float32)
y = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def _test_maxpool_1d_default():
op_name, op_type = "test_maxpool_1d_default", "MaxPool"
node = onnx.helper.make_node(
"MaxPool", inputs=["x"], outputs=["y"], kernel_shape=[2], name=op_name
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.shape(x)
kernel_shape = [2]
strides = [1]
from onnx.backend.test.case.node.pool_op_common import get_output_shape, pool
out_shape = get_output_shape("VALID", x_shape[2:], kernel_shape, strides)
padded = x
y = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], "MAX")
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_maxpool():
_test_maxpool_2d_ceil()
_test_maxpool_1d_default()
def test_mean():
op_name, op_type = "test_mean_example", "Mean"
data_0 = np.array([3, 0, 2]).astype(np.float32)
data_1 = np.array([1, 3, 4]).astype(np.float32)
data_2 = np.array([2, 6, 6]).astype(np.float32)
result = np.array([2, 3, 4]).astype(np.float32)
node = onnx.helper.make_node(
"Mean", inputs=["data_0", "data_1", "data_2"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1, data_2],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
op_name = "test_mean_two_inputs"
result = np.divide(np.add(data_0, data_1), 2.0)
node = onnx.helper.make_node(
"Mean", inputs=["data_0", "data_1"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
def test_min():
op_name, op_type = "test_min_example", "Min"
data_0 = np.array([3, 2, 1]).astype(np.float32)
data_1 = np.array([1, 4, 4]).astype(np.float32)
data_2 = np.array([2, 5, 0]).astype(np.float32)
result = np.array([1, 2, 0]).astype(np.float32)
node = onnx.helper.make_node(
"Min", inputs=["data_0", "data_1", "data_2"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1, data_2],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
op_name = "test_min_two_inputs"
result = np.minimum(data_0, data_1)
node = onnx.helper.make_node(
"Min", inputs=["data_0", "data_1"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
def test_mul():
op_name, op_type = "test_mul_example", "Mul"
node = onnx.helper.make_node("Mul", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.float32)
z = x * y # expected output [4., 10., 18.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_mul"
node = onnx.helper.make_node("Mul", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = x * y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_mul_bcast"
node = onnx.helper.make_node("Mul", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(5).astype(np.float32)
z = x * y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_neg():
op_name, op_type = "test_neg_example", "Neg"
node = onnx.helper.make_node("Neg", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-4, 2]).astype(np.float32)
y = np.negative(x) # expected output [4., -2.],
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_neg"
node = onnx.helper.make_node("Neg", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.negative(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_negativeloglikelihoodloss():
op_name, op_type = "test_nllloss_NC", "NegativeLogLikelihoodLoss"
reduction = "none"
node = onnx.helper.make_node(
"NegativeLogLikelihoodLoss",
inputs=["input", "target"],
outputs=["loss"],
reduction=reduction,
name=op_name,
)
#NOCC:invalid-name(其他:onnx example)
N, C = 3, 5
np.random.seed(0)
input = np.random.rand(N, C).astype(np.float32)
target = np.random.randint(0, high=C, size=(N,)).astype(np.int64)
from onnx.backend.test.case.node.negativeloglikelihoodloss import (
compute_negative_log_likelihood_loss,
)
negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
input, target, weight=None, reduction=reduction
)
op_expect(
node,
inputs=[input, target],
outputs=[negative_log_likelihood_loss],
op_type=op_type,
op_name=op_name,
)
def test_prelu():
op_name, op_type = "test_prelu_example", "PRelu"
node = onnx.helper.make_node(
"PRelu", inputs=["x", "slope"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
slope = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope
op_expect(node, inputs=[x, slope], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_prelu_broadcast"
node = onnx.helper.make_node(
"PRelu", inputs=["x", "slope"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
slope = np.random.randn(5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope
op_expect(node, inputs=[x, slope], outputs=[y], op_type=op_type, op_name=op_name)
def test_pow():
op_name, op_type = "test_pow_example", "Pow"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.float32)
z = pow(x, y) # expected output [1., 32., 729.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_pow"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.arange(60).reshape(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = pow(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_pow_bcast_scalar"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([2]).astype(np.float32)
z = pow(x, y) # expected output [1., 4., 9.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_pow_bcast_array"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
y = np.array([[1, 2, 3]]).astype(np.float32)
# expected output [[1, 4, 27], [4, 25, 216]]
z = pow(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_reciprocal():
op_name, op_type = "test_reciprocal_example", "Reciprocal"
node = onnx.helper.make_node(
"Reciprocal", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array([-4, 2]).astype(np.float32)
y = np.reciprocal(x) # expected output [-0.25, 0.5],
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_reciprocal"
node = onnx.helper.make_node(
"Reciprocal", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.rand(3, 4, 5).astype(np.float32) + 0.5
y = np.reciprocal(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_reducel1():
op_name, op_type = "test_reduce_l1_default_axes_keepdims_example", "ReduceL1"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceL1",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
# print(data)
# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
reduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)
# print(reduced)
# [[[78.]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)
op_name = "test_reduce_l1_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceL1",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducel2():
op_name, op_type = "test_reduce_l2_default_axes_keepdims_example", "ReduceL2"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceL2",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
# print(data)
# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
reduced = np.sqrt(np.sum(a=np.square(data), axis=axes, keepdims=keepdims == 1))
# print(reduced)
# [[[25.49509757]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_l2_default_axes_keepdims_random"
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sqrt(np.sum(a=np.square(data), axis=axes, keepdims=keepdims == 1))
node = onnx.helper.make_node(
"ReduceL2",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducelogsum():
op_name, op_type = "test_reduce_log_sum_default", "ReduceLogSum"
node = onnx.helper.make_node(
"ReduceLogSum", inputs=["data"], outputs=["reduced"], name=op_name
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, keepdims=True))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_negative_axes"
node = onnx.helper.make_node(
"ReduceLogSum", inputs=["data"], outputs=["reduced"], axes=[-2], name=op_name
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(-2), keepdims=True))
# print(reduced)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_desc_axes"
node = onnx.helper.make_node(
"ReduceLogSum",
inputs=["data"],
outputs=["reduced"],
axes=[2, 1],
keepdims=0,
name=op_name,
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(2, 1), keepdims=False))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_asc_axes"
node = onnx.helper.make_node(
"ReduceLogSum",
inputs=["data"],
outputs=["reduced"],
axes=[0, 1],
keepdims=0,
name=op_name,
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(0, 1), keepdims=False))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducelogsumexp():
op_name, op_type = (
"test_reduce_log_sum_exp_default_axes_keepdims_example",
"ReduceLogSumExp",
)
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceLogSumExp",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.array(
[[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32
)
reduced = np.log(np.sum(np.exp(data), axis=axes, keepdims=keepdims == 1))
# print(reduced)
# [[[60.00671387]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_exp_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceLogSumExp",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.log(np.sum(np.exp(data), axis=axes, keepdims=keepdims == 1))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducemax():
op_name, op_type = "test_reduce_max_default_axes_keepdim_example", "ReduceMax"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceMax",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.array(
[[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32
)
reduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)
# print(reduced)
# [[[60.]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_max_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceMax",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducemean():
op_name, op_type = "test_reduce_mean_default_axes_keepdims_example", "ReduceMean"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceMean",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.array(
[[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32
)
reduced = np.mean(data, axis=axes, keepdims=keepdims == 1)
# print(reduced)
# [[[18.25]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_mean_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceMean",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.mean(data, axis=axes, keepdims=keepdims == 1)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducesum():
batch_size = 32
op_name = "reduce_sum_1"
with tf.Graph().as_default():
input_ph = tf.placeholder(
dtype=tf.float32, shape=[batch_size, 256], name="input"
) # [batchsize, 10]
input_data = np.random.rand(batch_size, 256).astype(np.float32)
x = tf.math.reduce_sum(input_ph, axis=1, name=op_name)
_ = tf.identity(x, name="output")
verify_tf_with_trt_result(
[input_data], ["input:0"], ["output:0"], op_name=op_name
)
def test_maxunpool():
def verify_maxunpool(
data, indices, kernel_shape, strides, output_shape=None, pads=None, op_name=None
):
input_names = ["xT", "xI"]
input_info = [
helper.make_tensor_value_info("xT", TensorProto.FLOAT, list(data.shape)),
helper.make_tensor_value_info("xI", TensorProto.INT64, list(indices.shape)),
]
input_values = [data, indices]
# input_values = [data ]
if output_shape is not None:
input_names.append("output_shape")
input_info.append(
helper.make_tensor_value_info(
"output_shape", TensorProto.INT64, list(output_shape.shape)
)
)
input_values.append(output_shape)
else:
# Compute expected output shape
output_shape = np.asarray(([1, 1] + list(strides))) * np.asarray(
list(data.shape)
)
output_shape += np.asarray(([0, 0] + list(kernel_shape))) - np.asarray(
([0, 0] + list(strides))
)
if pads is not None:
output_shape -= np.asarray(
[0, 0] + list(np.sum(np.reshape(list(pads), [-1, 2]), axis=-1))
)
output_shape = [int(i) for i in output_shape]
node = helper.make_node(
"MaxUnpool",
inputs=input_names,
outputs=["y"],
kernel_shape=kernel_shape,
name=op_name,
)
if pads is not None:
pad_attr = helper.make_attribute("pads", pads)
node.attribute.append(pad_attr)
if strides is not None:
strides_attr = helper.make_attribute("strides", strides)
node.attribute.append(strides_attr)
graph = helper.make_graph(
[node],
"maxunpool_test",
inputs=input_info,
outputs=[
helper.make_tensor_value_info("y", TensorProto.FLOAT, output_shape)
],
)
model = helper.make_model(graph, producer_name="size_test")
verify_with_ort_with_trt(model, input_values, op_name=op_name, opset=11)
#NOCC:invalid-name(其他:onnx example)
xT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)
#NOCC:invalid-name(其他:onnx example)
xI = np.array([[[[0, 7], [13, 15]]]], dtype=np.int64)
verify_maxunpool(xT, xI, [2, 2], strides=[2, 2], op_name="max_unpool_1")
def _test_forward_one_hot(
indices_shape, depth, on_value, off_value, axis, out_dtype, op_name
):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(
shape=inp_array1.shape, dtype=inp_array1.dtype, name="input"
)
out = tf.one_hot(
in1, depth, on_value, off_value, axis, dtype=out_dtype, name=op_name
)
out = tf.identity(out, "output")
verify_tf_with_trt_result([inp_array1], ["input:0"], ["output:0"], op_name)
# compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32", "onehot_2")
def test_where():
op_name, op_type = "test_where", "Where"
node = onnx.helper.make_node(
"Where", inputs=["condition", "x", "y"], outputs=["z"], name=op_name
)
condition = np.array([[1, 0], [1, 1]], dtype=bool)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[9, 8], [7, 6]], dtype=np.float32)
z = np.where(condition, x, y) # expected output [[1, 8], [3, 4]]
op_expect(
node, inputs=[condition, x, y], outputs=[z], op_type=op_type, op_name=op_name
)
def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):
op_name = "slice_0"
if axes:
y = helper.make_node(
"Slice", ["in"], ["out"], axes=axes, starts=starts, ends=ends, name=op_name
)
else:
y = helper.make_node(
"Slice", ["in"], ["out"], starts=starts, ends=ends, name=op_name
)
graph = helper.make_graph(
[y],
"slice_test",
inputs=[
helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))
],
outputs=[
helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))
],
)
model = helper.make_model(graph, producer_name="slice_test")
# verify_with_ort_with_trt(model, [indata], [outdata.shape], op_name=op_name, opset=1)
verify_with_ort_with_trt(model, [indata], op_name=op_name, opset=1)
def test_slice():
x = np.random.randn(20, 10, 5).astype(np.float32)
_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
def verify_pad_v11(indata, pads, mode="constant", value=0.0):
op_name = "pad_001"
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
pads = np.array(pads)
# onnx graph
if mode in ["edge", "reflect"]:
inputs = [indata]
outdata = | np.pad(indata, pad_width=np_pads, mode=mode) | numpy.pad |
import logging
import mlflow
import numpy as np
import GPyOpt
import GPy
#from numpy.random import seed
from GPyOpt.models.gpmodel import GPModel
from GPyOpt.acquisitions import AcquisitionLCB
import networkx as nx
import collections
from myBOModular import MyBOModular
from myGPModel import MyGPModel
from GPyOpt.core.task.space import Design_space
from common import Config
import random
import os
import pickle
# CLEAN UP?
from function_optimizer import GraphOverlap, GraphNonOverlap, Tree, GraphFunction, OptimalGraphFunction
from exceptions import EarlyTerminationException
def normalize(v):
norm=np.linalg.norm(v, ord=1)
if norm==0:
norm=np.finfo(v.dtype).eps
return v/norm
from datasets import ComponentFunction, SyntheticComponentFunction
import function_optimizer
class MetaLoader(type):
registry = {}
loader_ids = []
def __new__(cls, cls_name, bases, attrs):
new_class = super(cls, MetaLoader).__new__(cls, cls_name, bases, attrs)
MetaLoader.registry[cls_name] = new_class
MetaLoader.loader_ids.append(cls_name)
return new_class
@staticmethod
def get_loader_constructor(loader_id):
logging.info("Load algorithm loader[%s].", loader_id)
return MetaLoader.registry[loader_id]
class Algorithm(type, metaclass=MetaLoader):
registry = {}
algorithm_ids = []
def __new__(cls, cls_name, bases, attrs):
new_class = super(cls, Algorithm).__new__(cls, cls_name, bases, attrs)
Algorithm.registry[cls_name] = new_class
Algorithm.algorithm_ids.append(cls_name)
return new_class
@staticmethod
def get_constructor(algorithm_id):
logging.info("Using algorithm with algorithm_id[%s].", algorithm_id)
return Algorithm.registry[algorithm_id]
from febo.models.gp import GPConfig
from febo.controller.simple import SimpleControllerConfig
from febo.environment.benchmarks import BenchmarkEnvironmentConfig
from febo.solvers.candidate import GridSolverConfig
from febo.algorithms.rembo import RemboConfig
from febo.models.model import ModelConfig
from febo.environment.benchmarks import BenchmarkEnvironment
from febo.environment import DiscreteDomain, ContinuousDomain
from febo.controller import SimpleController
import febo
class AdaptorBenchmark(BenchmarkEnvironment):
def __init__(self, fn):
super().__init__(path=None)
self.fn = fn
self.mlflow_logging = self.fn.mlflow_logging
dim = self.fn.domain.dimension
L = []
U = []
# Number of points per dimension
n_points = []
# Go through each domain of the dimension and find the l and u
for d in self.fn.domain.combined_domain:
L.append(np.min(d))
U.append(np.max(d))
n_points.append(len(d))
self._domain = ContinuousDomain(np.array(L), np.array(U))
#GridSolverConfig.points_per_dimension = np.max(n_points)
RemboConfig.emb_d = self.fn.get_emb_dim()
# ??
#self._domain = DiscreteDomain(np.array([[0.0, 0.0], [1.0, 1.0]]))
self._max_value = -self.mlflow_logging.y_opt
def f(self, x):
return np.float64(-self.fn(np.array([x])))
class GADDUCBAlgorithm(object):
def __init__(self, n_iter, algorithm_random_seed, n_rand, algoID="", fn=None, **kwargs):
self.algoID = algoID
self.n_iter = n_iter
self.domain = fn.domain
self.fn = fn
self.algorithm_random_seed = algorithm_random_seed
self.n_rand = n_rand
# Use the same Random Seed everywhere
# generate init design depends on the random seed setting.
np.random.seed(algorithm_random_seed)
random.seed(algorithm_random_seed)
self.rs = np.random.RandomState(algorithm_random_seed)
self.initial_design = self.domain.random_X(self.rs, n_rand)
def get_algorithm_id(self):
return self.__class__.__name__ + self.algoID
def run(self):
raise NotImplementedError
from boattack.utilities.utilities import get_init_data
from boattack.bayesopt import Bayes_opt
from boattack.utilities.upsampler import upsample_projection
class BattackAlgorithm(GADDUCBAlgorithm):
def __init__(self, fn, model_type, acq_type, sparse='None', nsubspaces=1, batch_size=None, update_freq=None, noise_var=None, exploration_weight=None,
grid_size=None, **kwargs):
GADDUCBAlgorithm.__init__(self, fn=fn, **kwargs)
#x_init, y_init = get_init_data(obj_func=fn.f_adapted, n_init=self.n_rand, bounds=fn.x_bounds_adapted)
beta = exploration_weight
obj_func = self.fn.obj_func
nchannel = self.fn.nchannel
high_dim = self.fn.high_dim
low_dim = self.fn.low_dim
dim_reduction = self.fn.dim_reduction
results_file_name = fn.results_file_name
failed_file_name = fn.failed_file_name
logging.info(f"Results file={results_file_name}")
logging.info(f"Failed file={failed_file_name}")
X_opt_all_slices = []
Y_opt_all_slices = []
X_query_all_slices = []
Y_query_all_slices = []
X_reduced_opt_all_slices = []
X_reduced_query_all_slices = []
# Generate initial observation data for BO
if os.path.exists(results_file_name) and 'LDR' not in model_type:
logging.info('load old init data')
with open(results_file_name, 'rb') as pre_file:
previous_bo_results = pickle.load(pre_file)
x_init = previous_bo_results['X_reduced_query'][0]
y_init = previous_bo_results['Y_query'][0]
else:
logging.info('generate new init data')
# There are some significant problems with a discrete domain.
try:
#x_init, y_init = get_init_data(obj_func=fn, n_init=self.n_rand, bounds=fn.x_bounds_adapted)
# There is some strange sampling that they are doing...
x_init = self.initial_design
y_init = self.fn(x_init)
except EarlyTerminationException as e:
# Failed on init, so we fix the init problem
fn.mlflow_logging.log_battack(int(True), fn.cnn.target_label[0])
fn.mlflow_logging.log_init_y(np.min(e.metrics['y']))
while fn.mlflow_logging.t_y < self.n_iter:
fn.mlflow_logging.log_cost_ba()
fn.mlflow_logging.log_battack(int(True), fn.cnn.target_label[0])
fn.mlflow_logging.log_y(e.metrics['y'])
return
#x_init, y_init = get_init_data(obj_func=f, n_init=n_init, bounds=x_bounds)
#x_init, y_init = get_init_data(obj_func=fn.f_adapted, n_init=self.n_rand, bounds=fn.x_bounds_adapted)
logging.info(f'X init shape {x_init.shape}')
# Initialise BO
#bayes_opt = Bayes_opt(func=f, bounds=x_bounds, saving_path=failed_file_name)
bayes_opt = Bayes_opt(func=fn, bounds=fn.x_bounds_adapted, saving_path=failed_file_name, noise_var=noise_var)
bayes_opt.initialise(X_init=x_init, Y_init=y_init, model_type=model_type, acq_type=acq_type,
sparse=sparse, nsubspaces=nsubspaces, batch_size=batch_size, update_freq=update_freq,
nchannel=nchannel, high_dim=high_dim, dim_reduction=dim_reduction,
cost_metric=None, seed=self.algorithm_random_seed, beta=beta, gridSize=grid_size)
# Run BO
logging.info("Run bayes_opt")
X_query_full, Y_query, X_opt_full, Y_opt, time_record = bayes_opt.run(total_iterations=self.n_iter)
# Reduce the memory needed for storing results
if 'LDR' in model_type:
X_query = X_query_full[-2:]
X_opt = X_opt_full[-2:]
else:
X_query = X_query_full
X_opt = X_opt_full[-2:]
# Store the results
Y_opt_all_slices.append(Y_opt)
Y_query_all_slices.append(Y_query)
opt_dr_list = bayes_opt.opt_dr_list
if dim_reduction == 'NONE':
X_reduced_opt_all_slices.append(X_opt.astype(np.float16))
X_reduced_query_all_slices.append(X_query.astype(np.float16))
X_query_all_slices.append(X_query)
X_opt_all_slices.append(X_opt)
logging.info(f'Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_opt.shape}, '
f'X_query shape{X_query.shape}, X_h_query shape{X_query.shape}, opt_dr={opt_dr_list[-1]}')
else:
X_reduced_opt_all_slices.append(X_opt.astype(np.float16))
X_reduced_query_all_slices.append(X_query.astype(np.float16))
# Transform data from reduced search space to original high-dimensional input space
X_h_query = upsample_projection(dim_reduction, X_query, low_dim=low_dim, high_dim=high_dim,
nchannel=nchannel)
X_query_all_slices.append(X_h_query)
X_h_opt = upsample_projection(dim_reduction, X_opt, low_dim=low_dim, high_dim=high_dim,
nchannel=nchannel)
X_opt_all_slices.append(X_h_opt)
logging.info(f'Y_opt={Y_opt[-1]}, X_opt shape{X_opt.shape}, X_h_opt shape{X_h_opt.shape}, '
f'X_query shape{X_query.shape}, X_h_query shape{X_h_query.shape}')
# For ImageNet images, save only the L_inf norm and L2 norm instead of the adversarial image
if 'imagenet' in obj_func:
l_inf_sum = np.abs(X_h_opt[-1, :]).sum()
l_2_norm = np.sqrt(np.sum((epsilon * X_h_opt[-1, :].ravel()) ** 2))
X_opt_all_slices = [l_inf_sum]
X_query_all_slices = [l_2_norm]
# Save the results locally
results = {'X_opt': X_opt_all_slices,
'Y_opt': Y_opt_all_slices,
'X_query': X_query_all_slices,
'Y_query': Y_query_all_slices,
'X_reduced_opt': X_reduced_opt_all_slices,
'X_reduced_query': X_reduced_query_all_slices,
'dr_opt_list': opt_dr_list,
'runtime': time_record}
with open(results_file_name, 'wb') as file:
pickle.dump(results, file)
def run(self):
logging.info("RUN")
def FEBO_Algorithm_Cls(self):
raise NotImplementedError
class BoAttack(BattackAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.Random
class FEBOAlgorithm(GADDUCBAlgorithm):
def __init__(self, initial_kernel_params=None, noise_var=None, **kwargs):
GADDUCBAlgorithm.__init__(self, **kwargs)
# Config the FEBO domains
GPConfig.noise_var = noise_var
# Default is RBF
if not 'gpy_kernel' in initial_kernel_params:
initial_kernel_params['gpy_kernel'] = 'GPy.kern.RBF'
GPConfig.kernels = [(initial_kernel_params['gpy_kernel'], {'variance': initial_kernel_params['variance'], 'lengthscale': initial_kernel_params['lengthscale'] , 'ARD': True})]
SimpleControllerConfig.T = self.n_iter
SimpleControllerConfig.best_predicted_every = 1
self.linebo_env = AdaptorBenchmark(self.fn)
_data = []
for x in self.initial_design:
y = self.fn(np.array([x]))
evaluation = np.empty(shape=(), dtype=self.linebo_env.dtype)
evaluation["x"] = x
evaluation["y"] = -y
evaluation["y_exact"] = -y
evaluation["y_max"] = self.linebo_env._max_value
_data.append(evaluation)
self.initial_data = _data
# Attempt to return f instead of y if that exist
self.fn.mlflow_logging.log_init_y(np.min(self.fn.history_y))
def run(self):
# Setup
s = None
try:
FEBO_Algo = self.FEBO_Algorithm_Cls()
s = AdaptorController(fn=self.fn, algorithm=FEBO_Algo(), environment=self.linebo_env)
s.initialize(algo_kwargs = dict(initial_data=self.initial_data))
s.run()
except Exception as e:
logging.exception("Exception")
finally:
if s:
s.finalize()
def FEBO_Algorithm_Cls(self):
raise NotImplementedError
class FEBO_Random(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.Random
class NelderMead(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.NelderMead
class RandomLineBO(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.RandomLineBO
class CoordinateLineBO(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.CoordinateLineBO
class AscentLineBO(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.AscentLineBO
class UCB(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
return febo.algorithms.UCB
class Rembo(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
from febo.algorithms.rembo import Rembo
return Rembo
class InterleavedRembo(FEBOAlgorithm, metaclass=Algorithm):
def FEBO_Algorithm_Cls(self):
from febo.algorithms.rembo import InterleavedRembo
return InterleavedRembo
class AdaptorController(SimpleController):
def __init__(self, fn, *args, **kwargs):
super(AdaptorController, self).__init__(*args, **kwargs)
self.fn = fn
def run(self):
logging.info(f"Starting optimization: {self.algorithm.name}")
# interaction loop
while not self._exit:
self._run_step()
evaluation = self._data[-1]
self.fn.mlflow_logging.log_y(np.min(self.fn.history_y[-1]))
# Random algorithm
class Random(GADDUCBAlgorithm, metaclass=Algorithm):
def __init__(self, **kwargs):
GADDUCBAlgorithm.__init__(self, **kwargs)
self.mlflow_logging = self.fn.mlflow_logging
def run(self):
f = self.fn
initial_design = self.initial_design
n_iter = self.n_iter
initial_design_iter = self.domain.random_X(self.rs, n_iter)
Y = []
Y_best = []
X_rand = []
y_best = np.inf
for x in initial_design:
y = f(np.array([x]))
Y.append(y)
if y < y_best:
y_best = y
Y_best.append(y_best)
X_rand.append(x)
self.mlflow_logging.log_init_y(np.min(self.fn.history_y))
for x in initial_design_iter:
y = f( | np.array([x]) | numpy.array |
import numpy as np
import healpy as hp
from BurstCube.LocSim.Spacecraft import Spacecraft
class Mission(Spacecraft):
"""This is a mission object that is a child of the BurstCube
Spacecraft Object. It's got some extra features like effective area
and you can setup a specfic real world spacecraft when you initialize
it. This is useful for a lot of the requirement sims."""
Missions = ('Bia', 'GBM', 'Fermi', 'HAM',
'Nimble', 'BATSE', 'BurstCube')
def __init__(self, mission, lat=0., lon=np.radians(260.),
antiEarth=False, Earth=True, NSIDE=32, fov=60.,
ea_dir=''):
"""Detector setup for various missions.
Parameters
----------
mission : str
string with the name of a specific mission.
lat : float
the latitude of the mission in degrees (default = 0).
lon : float
the longitude of the mission in degrees.
Returns
---------
"""
self.mission = mission
self.antiEarth = antiEarth
self.Earth = Earth
self.NSIDE = NSIDE
self.fov = fov
self.ea_dir = ea_dir
if self.mission not in self.Missions:
print('No such mission', mission)
return
if mission == 'Bia':
pointings = {'01': ('30:0:0', '55:0:0'),
'02': ('90:0:0', '55:0:0'),
'03': ('150:0:0', '55:0:0'),
'04': ('210:0:0', '55:0:0'),
'05': ('270:0:0', '55:0:0'),
'06': ('330:0:0', '55:0:0'),
'07': ('0:0:0', '0:0:0')}
self.cosindex = 0.6
self.Aeff = 320. # cm2
if((mission == 'GBM') or (mission == 'Fermi')):
pointings = {'01': ('45:54:0', '20:36:0'),
'02': ('45:6:0', '45:18:0'),
'03': ('58:24:0', '90:12:0'),
'04': ('314:54:0', '45:12:0'),
'05': ('303:12:0', '90:18:0'),
'06': ('3:24:0', '89:48:0'),
'07': ('224:54:0', '20:24:0'),
'08': ('224:36:0', '46:12:0'),
'09': ('236:36:0', '90:0:0'),
'10': ('135:12:0', '45:36:0'),
'11': ('123:42:0', '90:24:0'),
'12': ('183:42:0', '90:18:0')}
self.Aeff = 132.
self.cosindex = 0.78
lat = np.radians(50.)
lon = np.radians(260.)
if ((mission == 'HAM') or (mission == 'Nimble')):
ang = 45.
pointings = {'01': ('60:00:00', str(ang)+':0:0'),
'02': ('120:00:00', str(ang)+':0:0'),
'03': ('180:00:00', str(ang)+':0:0'),
'04': ('240:00:00', str(ang)+':0:0'),
'05': ('300:00:00', str(ang)+':0:0'),
'06': ('00:00:00', str(ang)+':0:0'),
'07': ('00:00:00', '00:00:00')}
self.Aeff = 132 # cm2
self.cosindex = 0.78
if mission == 'BATSE':
ang = 45
pointings = {'01': ('0:0:0', str(ang)+':0:0'),
'02': ('90:0:0', str(ang)+':0:0'),
'03': ('180:0:0', str(ang)+':0:0'),
'04': ('270:0:0', str(ang)+':0:0'),
'05': ('0:0:0', str(ang+90)+':0:0'),
'06': ('90:0:0', str(ang+90)+':0:0'),
'07': ('180:0:0', str(ang+90)+':0:0'),
'08': ('270:0:0', str(ang+90)+':0:0')}
self.cosindex = 1.0
self.Aeff = 1500.
if mission == 'BurstCube':
pointings = {'01': ('0:0:0', '45:0:0'),
'02': ('90:0:0', '45:0:0'),
'03': ('180:0:0', '45:0:0'),
'04': ('270:0:0', '45:0:0')}
self.Aeff = 61.
self.cosindex = 0.6
super().__init__(pointings, lat, lon)
def loadAeff(self):
"""Load the full effictive areas. Only have BurstCube and GBM now."""
from pkg_resources import resource_filename
from astropy.io import ascii as as_ascii
filenames = {'BurstCube': 'BC_eff_area_curves.ecsv',
'GBM': 'gbm_effective_area.dat',
'Fermi': 'gbm_effective_area.dat'}
ea_file = filenames[self.mission]
if self.ea_dir == '':
fname = resource_filename('BurstCube',
'data/' + ea_file)
else:
fname = self.ea_dir+ea_file
if self.mission == 'BurstCube':
bcaeffs = as_ascii.read(fname, format='ecsv')
w = np.where((bcaeffs['diameter'] == 90)
& (bcaeffs['height'] == 19))
aeff = bcaeffs[w]
elif (self.mission == 'GBM') or (self.mission == 'Fermi'):
aeff = np.genfromtxt(fname, skip_header=2,
names=('energy', 'aeff'))
else:
raise AttributeError("No such Mission")
self.Aeff_full = aeff
def calcExposures(self):
"""Short descrtiption of this function.
Parameters
----------
Earth : bool
Unknown
antiEarth : bool
Unknown
NSIDE : int
Resolution of the healpix map
Returns
---------
"""
exposure_positions_hp = np.arange(hp.nside2npix(self.NSIDE))
exposure_positions_pix = hp.pix2ang(self.NSIDE, exposure_positions_hp,
lonlat=True)
self.exposure_positions = np.vstack(exposure_positions_pix)
self.exposures = np.array([[detector.exposure(position[0], position[1],
alt=-90.,
index=self.cosindex)
for position in self.exposure_positions.T]
for detector in self.detectors])
exps = self.exposures.sum(axis=0)*self.Aeff
self.fs = exps # -min(gbm_exps))/max(gbm_exps)
if self.Earth:
vec = hp.ang2vec(180, 0, lonlat=True)
i = hp.query_disc(self.NSIDE, vec, 67*np.pi/180.)
self.fs[i] = 0
self.exposures[:, i] = 0
if self.antiEarth:
vec = hp.ang2vec(np.degrees(self.lon)-260.+180., 0, lonlat=True)
i = hp.query_disc(self.NSIDE, vec, 67*np.pi/180.)
self.fs[i] = 0
self.exposures[:, i] = 0
def plotExposures(self):
"""Plots the exposures on the sky."""
import matplotlib.pylab as plot
npointings = len(self.pointings)
plot.figure(figsize=(20, npointings))
s = np.argsort(self.pointings.keys())
for j in range(npointings):
i = s[j]
hp.mollview(self.exposures[i]/max(self.exposures[i])*self.Aeff,
title='Detector ',
sub=[ | np.round(npointings/3.+0.5) | numpy.round |
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch import optim
import numpy as np
import math, random
# Generating a noisy multi-sin wave
def sine_2(X, signal_freq=60.):
return (np.sin(2 * np.pi * (X) / signal_freq) + np.sin(4 * np.pi * (X) / signal_freq)) / 2.0
def noisy(Y, noise_range=(-0.05, 0.05)):
noise = np.random.uniform(noise_range[0], noise_range[1], size=Y.shape)
return Y + noise
def sample(sample_size):
random_offset = random.randint(0, sample_size)
X = np.arange(sample_size)
Y = noisy(sine_2(X + random_offset))
return Y
# Define the model
class SimpleRNN(nn.Module):
def __init__(self, hidden_size):
super(SimpleRNN, self).__init__()
self.hidden_size = hidden_size
self.inp = nn.Linear(1, hidden_size)
self.rnn = nn.LSTM(hidden_size, hidden_size, 2, dropout=0.05)
self.out = nn.Linear(hidden_size, 1)
def step(self, input, hidden=None):
input = self.inp(input.view(1, -1)).unsqueeze(1)
output, hidden = self.rnn(input, hidden)
output = self.out(output.squeeze(1))
return output, hidden
def forward(self, inputs, hidden=None, force=True, steps=0):
if force or steps == 0: steps = len(inputs)
outputs = Variable(torch.zeros(steps, 1, 1))
for i in range(steps):
if force or i == 0:
input = inputs[i]
else:
input = output
output, hidden = self.step(input, hidden)
outputs[i] = output
return outputs, hidden
n_epochs = 100
n_iters = 50
hidden_size = 10
model = SimpleRNN(hidden_size)
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
losses = | np.zeros(n_epochs) | numpy.zeros |
# -*- coding: utf-8 -*-
r"""Define an instrument for resolution calculations
"""
import numpy as np
from scipy.linalg import block_diag as blkdiag
from ..crystal import Sample
from ..energy import Energy
from .analyzer import Analyzer
from .exceptions import ScatteringTriangleError
from .general import GeneralInstrument
from .monochromator import Monochromator
from .plot import PlotInstrument
from .tools import GetTau, _CleanArgs, _Dummy, _modvec, _scalar, _star, _voigt
class TripleAxisInstrument(GeneralInstrument, PlotInstrument):
u"""An object that represents a Triple Axis Spectrometer (TAS) instrument
experimental configuration, including a sample.
Parameters
----------
efixed : float, optional
Fixed energy, either ei or ef, depending on the instrument
configuration. Default: 14.7
sample : obj, optional
Sample lattice constants, parameters, mosaic, and orientation
(reciprocal-space orienting vectors). Default: A crystal with
a,b,c = 6,7,8 and alpha,beta,gamma = 90,90,90 and orientation
vectors u=[1 0 0] and v=[0 1 0].
hcol : list(4)
Horizontal Soller collimations in minutes of arc starting from the
neutron guide. Default: [40 40 40 40]
vcol : list(4), optional
Vertical Soller collimations in minutes of arc starting from the
neutron guide. Default: [120 120 120 120]
mono_tau : str or float, optional
The monochromator reciprocal lattice vector in Å\ :sup:`-1`,
given either as a float, or as a string for common monochromator types.
Default: 'PG(002)'
mono_mosaic : float, optional
The mosaic of the monochromator in minutes of arc. Default: 25
ana_tau : str or float, optional
The analyzer reciprocal lattice vector in Å\ :sup:`-1`,
given either as a float, or as a string for common analyzer types.
Default: 'PG(002)'
ana_mosaic : float, optional
The mosaic of the monochromator in minutes of arc. Default: 25
Attributes
----------
method
moncor
mono
ana
hcol
vcol
arms
efixed
sample
orient1
orient2
infin
beam
detector
monitor
Smooth
guide
description_string
Methods
-------
calc_resolution
calc_resolution_in_Q_coords
calc_projections
get_angles_and_Q
get_lattice
get_resolution_params
get_resolution
plot_projections
plot_ellipsoid
plot_instrument
resolution_convolution
resolution_convolution_SMA
plot_slice
"""
def __init__(self, efixed=14.7, sample=None, hcol=None, vcol=None, mono='PG(002)',
mono_mosaic=25, ana='PG(002)', ana_mosaic=25, **kwargs):
if sample is None:
sample = Sample(6, 7, 8, 90, 90, 90)
sample.u = [1, 0, 0]
sample.v = [0, 1, 0]
if hcol is None:
hcol = [40, 40, 40, 40]
if vcol is None:
vcol = [120, 120, 120, 120]
self.mono = Monochromator(mono, mono_mosaic)
self.ana = Analyzer(ana, ana_mosaic)
self.hcol = np.array(hcol)
self.vcol = np.array(vcol)
self.efixed = efixed
self.sample = sample
self.orient1 = np.array(sample.u)
self.orient2 = np.array(sample.v)
self.detector = _Dummy('Detector')
self.monitor = _Dummy('Monitor')
self.guide = _Dummy('Guide')
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Instrument('tas', engine='neutronpy', efixed={0})".format(self.efixed)
def __eq__(self, right):
self_parent_keys = sorted(list(self.__dict__.keys()))
right_parent_keys = sorted(list(right.__dict__.keys()))
if not np.all(self_parent_keys == right_parent_keys):
return False
for key, value in self.__dict__.items():
right_parent_val = getattr(right, key)
if not np.all(value == right_parent_val):
print(value, right_parent_val)
return False
return True
def __ne__(self, right):
return not self.__eq__(right)
@property
def mono(self):
u"""A structure that describes the monochromator.
Attributes
----------
tau : str or float
The monochromator reciprocal lattice vector in Å\ :sup:`-1`.
Instead of a numerical input one can use one of the following
keyword strings:
+------------------+--------------+-----------+
| String | τ | |
+==================+==============+===========+
| Be(002) | 3.50702 | |
+------------------+--------------+-----------+
| Co0.92Fe0.08(200)| 3.54782 | (Heusler) |
+------------------+--------------+-----------+
| Cu(002) | 3.47714 | |
+------------------+--------------+-----------+
| Cu(111) | 2.99913 | |
+------------------+--------------+-----------+
| Cu(220) | 4.91642 | |
+------------------+--------------+-----------+
| Cu2MnAl(111) | 1.82810 | (Heusler) |
+------------------+--------------+-----------+
| Ge(111) | 1.92366 | |
+------------------+--------------+-----------+
| Ge(220) | 3.14131 | |
+------------------+--------------+-----------+
| Ge(311) | 3.68351 | |
+------------------+--------------+-----------+
| Ge(511) | 5.76968 | |
+------------------+--------------+-----------+
| Ge(533) | 7.28063 | |
+------------------+--------------+-----------+
| PG(002) | 1.87325 | |
+------------------+--------------+-----------+
| PG(004) | 3.74650 | |
+------------------+--------------+-----------+
| PG(110) | 5.49806 | |
+------------------+--------------+-----------+
| Si(111) | 2.00421 | |
+------------------+--------------+-----------+
mosaic : int
The monochromator mosaic in minutes of arc.
vmosaic : int
The vertical mosaic of monochromator in minutes of arc. If
this field is left unassigned, an isotropic mosaic is assumed.
dir : int
Direction of the crystal (left or right, -1 or +1, respectively).
Default: -1 (left-handed coordinate frame).
rh : float
Horizontal curvature of the monochromator in cm.
rv : float
Vertical curvature of the monochromator in cm.
"""
return self._mono
@mono.setter
def mono(self, value):
self._mono = value
@property
def ana(self):
u"""A structure that describes the analyzer and contains fields as in
:attr:`mono` plus optional fields.
Attributes
----------
thickness: float
The analyzer thickness in cm for ideal-crystal reflectivity
corrections (Section II C 3). If no reflectivity corrections are to
be made, this field should remain unassigned or set to a negative
value.
Q : float
The kinematic reflectivity coefficient for this correction. It is
given by
.. math:: Q = \\frac{4|F|**2}{V_0} \\frac{(2\\pi)**3}{\\tau**3},
where V0 is the unit cell volume for the analyzer crystal, F is the
structure factor of the analyzer reflection, and τ is the analyzer
reciprocal lattice vector. For PG(002) Q = 0.1287. Leave this field
unassigned or make it negative if you don’t want the correction
done.
horifoc : bool
A flag that is set to 1 if a horizontally focusing analyzer is used
(Section II D). In this case ``hcol[2]`` (see below) is the angular
size of the analyzer, as seen from the sample position. If the
field is unassigned or equal to -1, a flat analyzer is assumed.
Note that this option is only available with the Cooper-Nathans
method.
dir : int
Direction of the crystal (left or right, -1 or +1, respectively).
Default: -1 (left-handed coordinate frame).
rh : float
Horizontal curvature of the analyzer in cm.
rv : float
Vertical curvature of the analyzer in cm.
"""
return self._ana
@ana.setter
def ana(self, value):
self._ana = value
@property
def method(self):
"""Selects the computation method.
If ``method=0`` or left undefined, a Cooper-Nathans calculation is
performed. For a Popovici calculation set ``method=1``.
"""
return self._method
@method.setter
def method(self, value):
self._method = value
@property
def moncor(self):
"""Selects the type of normalization used to calculate ``R0``
If ``moncor=1`` or left undefined, ``R0`` is calculated in
normalization to monitor counts (Section II C 2). 1/k\ :sub:`i` monitor
efficiency correction is included automatically. To normalize ``R0`` to
source flux (Section II C 1), use ``moncor=0``.
"""
return self._moncar
@moncor.setter
def moncor(self, value):
self._moncar = value
@property
def hcol(self):
r""" The horizontal Soller collimations in minutes of arc (FWHM beam
divergence) starting from the in-pile collimator. In case of a
horizontally-focusing analyzer ``hcol[2]`` is the angular size of the
analyzer, as seen from the sample position. If the beam divergence is
limited by a neutron guide, the corresponding element of :attr:`hcol`
is the negative of the guide’s *m*-value. For example, for a 58-Ni
guide ( *m* = 1.2 ) before the monochromator, ``hcol[0]`` should be
-1.2.
"""
return self._hcol
@hcol.setter
def hcol(self, value):
self._hcol = value
@property
def vcol(self):
"""The vertical Soller collimations in minutes of arc (FWHM beam
divergence) starting from the in-pile collimator. If the beam
divergence is limited by a neutron guide, the corresponding element of
:attr:`vcol` is the negative of the guide’s *m*-value. For example, for
a 58-Ni guide ( *m* = 1.2 ) before the monochromator, ``vcol[0]``
should be -1.2.
"""
return self._vcol
@vcol.setter
def vcol(self, value):
self._vcol = value
@property
def arms(self):
"""distances between the source and monochromator, monochromator
and sample, sample and analyzer, analyzer and detector, and
monochromator and monitor, respectively. The 5th element is only needed
if ``moncor=1``
"""
return self._arms
@arms.setter
def arms(self, value):
self._arms = value
@property
def efixed(self):
"""the fixed incident or final neutron energy, in meV.
"""
return self._efixed
@efixed.setter
def efixed(self, value):
self._efixed = value
@property
def sample(self):
"""A structure that describes the sample.
Attributes
----------
mosaic
FWHM sample mosaic in the scattering plane
in minutes of arc. If left unassigned, no sample
mosaic corrections (section II E) are performed.
vmosaic
The vertical sample mosaic in minutes of arc.
If left unassigned, isotropic mosaic is assumed.
dir
The direction of the crystal (left or right, -1 or +1,
respectively). Default: -1 (left-handed coordinate frame).
"""
return self._sample
@sample.setter
def sample(self, value):
self._sample = value
@property
def orient1(self):
"""Miller indexes of the first reciprocal-space orienting vector for
the S coordinate system, as explained in Section II G.
"""
return self._sample.u
@orient1.setter
def orient1(self, value):
self._sample.u = np.array(value)
@property
def orient2(self):
"""Miller indexes of the second reciprocal-space orienting vector
for the S coordinate system, as explained in Section II G.
"""
return self._sample.v
@orient2.setter
def orient2(self, value):
self._sample.v = np.array(value)
@property
def infin(self):
"""a flag set to -1 or left unassigned if the final energy is fixed, or
set to +1 in a fixed-incident setup.
"""
return self._infin
@infin.setter
def infin(self, value):
self._infin = value
@property
def guide(self):
r"""A structure that describes the source
"""
return self._guide
@guide.setter
def guide(self, value):
self._guide = value
@property
def detector(self):
"""A structure that describes the detector
"""
return self._detector
@detector.setter
def detector(self, value):
self._detector = value
@property
def monitor(self):
"""A structure that describes the monitor
"""
return self._monitor
@monitor.setter
def monitor(self, value):
self._monitor = value
@property
def Smooth(self):
u"""Defines the smoothing parameters as explained in Section II H. Leave this
field unassigned if you don’t want this correction done.
* ``Smooth.E`` is the smoothing FWHM in energy (meV). A small number
means “no smoothing along this direction”.
* ``Smooth.X`` is the smoothing FWHM along the first orienting vector
(x0 axis) in Å\ :sup:`-1`.
* ``Smooth.Y`` is the smoothing FWHM along the y axis in Å\ :sup:`-1`.
* ``Smooth.Z`` is the smoothing FWHM along the vertical direction in
Å\ :sup:`-1`.
"""
return self._Smooth
@Smooth.setter
def Smooth(self, value):
self._Smooth = value
def get_lattice(self):
r"""Extracts lattice parameters from EXP and returns the direct and
reciprocal lattice parameters in the form used by _scalar.m, _star.m,
etc.
Returns
-------
[lattice, rlattice] : [class, class]
Returns the direct and reciprocal lattice sample classes
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
lattice = Sample(self.sample.a,
self.sample.b,
self.sample.c,
np.deg2rad(self.sample.alpha),
np.deg2rad(self.sample.beta),
np.deg2rad(self.sample.gamma))
rlattice = _star(lattice)[-1]
return [lattice, rlattice]
def _StandardSystem(self):
r"""Returns rotation matrices to calculate resolution in the sample view
instead of the instrument view
Attributes
----------
EXP : class
Instrument class
Returns
-------
[x, y, z, lattice, rlattice] : [array, array, array, class, class]
Returns the rotation matrices and real and reciprocal lattice
sample classes
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
[lattice, rlattice] = self.get_lattice()
orient1 = self.orient1
orient2 = self.orient2
modx = _modvec(orient1, rlattice)
x = orient1 / modx
proj = _scalar(orient2, x, rlattice)
y = orient2 - x * proj
mody = _modvec(y, rlattice)
if len(np.where(mody <= 0)[0]) > 0:
raise ScatteringTriangleError('Orienting vectors are colinear')
y /= mody
z = np.array([ x[1] * y[2] - y[1] * x[2],
x[2] * y[0] - y[2] * x[0],
-x[1] * y[0] + y[1] * x[0]], dtype=np.float64)
proj = _scalar(z, x, rlattice)
z -= x * proj
proj = _scalar(z, y, rlattice)
z -= y * proj
modz = _modvec(z, rlattice)
z /= modz
return [x, y, z, lattice, rlattice]
def calc_resolution_in_Q_coords(self, Q, W):
r"""For a momentum transfer Q and energy transfers W, given experimental
conditions specified in EXP, calculates the Cooper-Nathans or Popovici
resolution matrix RM and resolution prefactor R0 in the Q coordinate
system (defined by the scattering vector and the scattering plane).
Parameters
----------
Q : ndarray or list of ndarray
The Q vectors in reciprocal space at which resolution should be
calculated, in inverse angstroms
W : float or list of floats
The energy transfers at which resolution should be calculated in meV
Returns
-------
[R0, RM] : list(float, ndarray)
Resolution pre-factor (R0) and resolution matrix (RM) at the given
reciprocal lattice vectors and energy transfers
Notes
-----
Translated from ResLib 3.4c, originally authored by <NAME>,
1999-2007, Oak Ridge National Laboratory
"""
CONVERT1 = np.pi / 60. / 180. / np.sqrt(8 * np.log(2))
CONVERT2 = 2.072
[length, Q, W] = _CleanArgs(Q, W)
RM = np.zeros((length, 4, 4), dtype=np.float64)
R0 = np.zeros(length, dtype=np.float64)
RM_ = np.zeros((4, 4), dtype=np.float64)
# the method to use
method = 0
if hasattr(self, 'method'):
method = self.method
# Assign default values and decode parameters
moncor = 1
if hasattr(self, 'moncor'):
moncor = self.moncor
alpha = np.array(self.hcol) * CONVERT1
beta = np.array(self.vcol) * CONVERT1
mono = self.mono
etam = np.array(mono.mosaic) * CONVERT1
etamv = np.copy(etam)
if hasattr(mono, 'vmosaic') and (method == 1 or method == 'Popovici'):
etamv = np.array(mono.vmosaic) * CONVERT1
ana = self.ana
etaa = np.array(ana.mosaic) * CONVERT1
etaav = np.copy(etaa)
if hasattr(ana, 'vmosaic'):
etaav = np.array(ana.vmosaic) * CONVERT1
sample = self.sample
infin = -1
if hasattr(self, 'infin'):
infin = self.infin
efixed = self.efixed
monitorw = 1.
monitorh = 1.
beamw = 1.
beamh = 1.
monow = 1.
monoh = 1.
monod = 1.
anaw = 1.
anah = 1.
anad = 1.
detectorw = 1.
detectorh = 1.
sshapes = np.repeat(np.eye(3, dtype=np.float64)[np.newaxis].reshape((1, 3, 3)), length, axis=0)
sshape_factor = 12.
L0 = 1.
L1 = 1.
L1mon = 1.
L2 = 1.
L3 = 1.
monorv = 1.e6
monorh = 1.e6
anarv = 1.e6
anarh = 1.e6
if hasattr(self, 'guide'):
beam = self.guide
if hasattr(beam, 'width'):
beamw = beam.width ** 2 / 12.
if hasattr(beam, 'height'):
beamh = beam.height ** 2 / 12.
bshape = np.diag([beamw, beamh])
if hasattr(self, 'monitor'):
monitor = self.monitor
if hasattr(monitor, 'width'):
monitorw = monitor.width ** 2 / 12.
monitorh = monitorw
if hasattr(monitor, 'height'):
monitorh = monitor.height ** 2 / 12.
monitorshape = np.diag([monitorw, monitorh])
if hasattr(self, 'detector'):
detector = self.detector
if hasattr(detector, 'width'):
detectorw = detector.width ** 2 / 12.
if hasattr(detector, 'height'):
detectorh = detector.height ** 2 / 12.
dshape = np.diag([detectorw, detectorh])
if hasattr(mono, 'width'):
monow = mono.width ** 2 / 12.
if hasattr(mono, 'height'):
monoh = mono.height ** 2 / 12.
if hasattr(mono, 'depth'):
monod = mono.depth ** 2 / 12.
mshape = np.diag([monod, monow, monoh])
if hasattr(ana, 'width'):
anaw = ana.width ** 2 / 12.
if hasattr(ana, 'height'):
anah = ana.height ** 2 / 12.
if hasattr(ana, 'depth'):
anad = ana.depth ** 2 / 12.
ashape = np.diag([anad, anaw, anah])
if hasattr(sample, 'shape_type'):
if sample.shape_type == 'cylindrical':
sshape_factor = 16.
elif sample.shape_type == 'rectangular':
sshape_factor = 12.
if hasattr(sample, 'width') and hasattr(sample, 'depth') and hasattr(sample, 'height'):
_sshape = np.diag([sample.depth, sample.width, sample.height]).astype(np.float64) ** 2 / sshape_factor
sshapes = np.repeat(_sshape[np.newaxis].reshape((1, 3, 3)), length, axis=0)
elif hasattr(sample, 'shape'):
_sshape = sample.shape.astype(np.float64) / sshape_factor
if len(_sshape.shape) == 2:
sshapes = np.repeat(_sshape[np.newaxis].reshape((1, 3, 3)), length, axis=0)
else:
sshapes = _sshape
if hasattr(self, 'arms') and method == 1:
arms = self.arms
L0, L1, L2, L3 = arms[:4]
L1mon = np.copy(L1)
if len(arms) > 4:
L1mon = np.copy(arms[4])
if hasattr(mono, 'rv'):
monorv = mono.rv
if hasattr(mono, 'rh'):
monorh = mono.rh
if hasattr(ana, 'rv'):
anarv = ana.rv
if hasattr(ana, 'rh'):
anarh = ana.rh
taum = GetTau(mono.tau)
taua = GetTau(ana.tau)
horifoc = -1
if hasattr(self, 'horifoc'):
horifoc = self.horifoc
if horifoc == 1:
alpha[2] = alpha[2] * np.sqrt(8. * np.log(2.) / 12.)
sm = self.mono.dir
ss = self.sample.dir
sa = self.ana.dir
for ind in range(length):
sshape = sshapes[ind, :, :]
# Calculate angles and energies
w = W[ind]
q = Q[ind]
ei = efixed
ef = efixed
if infin > 0:
ef = efixed - w
else:
ei = efixed + w
ki = np.sqrt(ei / CONVERT2)
kf = np.sqrt(ef / CONVERT2)
thetam = np.arcsin(taum / (2. * ki)) * sm
thetaa = np.arcsin(taua / (2. * kf)) * sa
s2theta = np.arccos(np.complex((ki ** 2 + kf ** 2 - q ** 2) / (2. * ki * kf))) * ss
if np.abs(np.imag(s2theta)) > 1e-12:
raise ScatteringTriangleError(
'KI,KF,Q triangle will not close. Change the value of KFIX,FX,QH,QK or QL.')
else:
s2theta = np.real(s2theta)
# correct sign of curvatures
monorh = monorh * sm
monorv = monorv * sm
anarh = anarh * sa
anarv = anarv * sa
thetas = s2theta / 2.
phi = np.arctan2(-kf * np.sin(s2theta), ki - kf * np.cos(s2theta))
# Calculate beam divergences defined by neutron guides
alpha[alpha < 0] = -alpha[alpha < 0] * 0.1 * 60. * (2. * np.pi / ki) / 0.427 / np.sqrt(3.)
beta[beta < 0] = -beta[beta < 0] * 0.1 * 60. * (2. * np.pi / ki) / 0.427 / np.sqrt(3.)
# Redefine sample geometry
psi = thetas - phi # Angle from sample geometry X axis to Q
rot = np.matrix([[np.cos(psi), np.sin(psi), 0],
[-np.sin(psi), np.cos(psi), 0],
[0, 0, 1]], dtype=np.float64)
# sshape=rot'*sshape*rot
sshape = np.matrix(rot) * np.matrix(sshape) * np.matrix(rot).H
# Definition of matrix G
G = np.matrix(
np.diag(1. / np.array([alpha[:2], beta[:2], alpha[2:], beta[2:]], dtype=np.float64).flatten() ** 2))
# Definition of matrix F
F = np.matrix(np.diag(1. / np.array([etam, etamv, etaa, etaav], dtype=np.float64) ** 2))
# Definition of matrix A
A = np.matrix([[ki / 2. / np.tan(thetam), -ki / 2. / np.tan(thetam), 0, 0, 0, 0, 0, 0],
[0, ki, 0, 0, 0, 0, 0, 0],
[0, 0, 0, ki, 0, 0, 0, 0],
[0, 0, 0, 0, kf / 2. / np.tan(thetaa), -kf / 2. / np.tan(thetaa), 0, 0],
[0, 0, 0, 0, kf, 0, 0, 0],
[0, 0, 0, 0, 0, 0, kf, 0]], dtype=np.float64)
# Definition of matrix C
C = np.matrix([[0.5, 0.5, 0, 0, 0, 0, 0, 0],
[0., 0., 1. / (2. * np.sin(thetam)), -1. / (2. * np.sin(thetam)), 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0.5, 0, 0],
[0, 0, 0, 0, 0, 0, 1. / (2. * np.sin(thetaa)), -1. / (2. * np.sin(thetaa))]],
dtype=np.float64)
# Definition of matrix Bmatrix
Bmatrix = np.matrix([[np.cos(phi), np.sin(phi), 0, -np.cos(phi - s2theta), -np.sin(phi - s2theta), 0],
[-np.sin(phi), np.cos(phi), 0, np.sin(phi - s2theta), -np.cos(phi - s2theta), 0],
[0, 0, 1, 0, 0, -1],
[2. * CONVERT2 * ki, 0, 0, -2. * CONVERT2 * kf, 0, 0]], dtype=np.float64)
# Definition of matrix S
Sinv = np.matrix(blkdiag(np.array(bshape, dtype=np.float64), mshape, sshape, ashape, dshape)) # S-1 matrix
S = Sinv.I
# Definition of matrix T
T = np.matrix([[-1. / (2. * L0), 0, np.cos(thetam) * (1. / L1 - 1. / L0) / 2.,
np.sin(thetam) * (1. / L0 + 1. / L1 - 2. / (monorh * np.sin(thetam))) / 2., 0,
np.sin(thetas) / (2. * L1), np.cos(thetas) / (2. * L1), 0, 0, 0, 0, 0, 0],
[0, -1. / (2. * L0 * np.sin(thetam)), 0, 0,
(1. / L0 + 1. / L1 - 2. * np.sin(thetam) / monorv) / (2. * np.sin(thetam)), 0, 0,
-1. / (2. * L1 * np.sin(thetam)), 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.sin(thetas) / (2. * L2), -np.cos(thetas) / (2. * L2), 0,
np.cos(thetaa) * (1. / L3 - 1. / L2) / 2.,
np.sin(thetaa) * (1. / L2 + 1. / L3 - 2. / (anarh * np.sin(thetaa))) / 2., 0,
1. / (2. * L3), 0],
[0, 0, 0, 0, 0, 0, 0, -1. / (2. * L2 * np.sin(thetaa)), 0, 0,
(1. / L2 + 1. / L3 - 2. * np.sin(thetaa) / anarv) / (2. * np.sin(thetaa)), 0,
-1. / (2. * L3 * np.sin(thetaa))]], dtype=np.float64)
# Definition of matrix D
# Lots of index mistakes in paper for matrix D
D = np.matrix([[-1. / L0, 0, -np.cos(thetam) / L0, np.sin(thetam) / L0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.cos(thetam) / L1, np.sin(thetam) / L1, 0, np.sin(thetas) / L1, np.cos(thetas) / L1,
0, 0, 0, 0, 0, 0],
[0, -1. / L0, 0, 0, 1. / L0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1. / L1, 0, 0, 1. / L1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.sin(thetas) / L2, -np.cos(thetas) / L2, 0, -np.cos(thetaa) / L2,
np.sin(thetaa) / L2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.cos(thetaa) / L3, np.sin(thetaa) / L3, 0, 1. / L3, 0],
[0, 0, 0, 0, 0, 0, 0, -1. / L2, 0, 0, 1. / L2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1. / L3, 0, 1. / L3]], dtype=np.float64)
# Definition of resolution matrix M
if method == 1 or method == 'popovici':
K = S + T.H * F * T
H = np.linalg.inv(D * np.linalg.inv(K) * D.H)
Ninv = A * np.linalg.inv(H + G) * A.H
else:
H = G + C.H * F * C
Ninv = A * np.linalg.inv(H) * A.H
# Horizontally focusing analyzer if needed
if horifoc > 0:
Ninv = np.linalg.inv(Ninv)
Ninv[3:5, 3:5] = np.matrix([[(np.tan(thetaa) / (etaa * kf)) ** 2, 0],
[0, (1 / (kf * alpha[2])) ** 2]], dtype=np.float64)
Ninv = np.linalg.inv(Ninv)
Minv = Bmatrix * Ninv * Bmatrix.H
M = np.linalg.inv(Minv)
RM_ = np.copy(M)
# Calculation of prefactor, normalized to source
Rm = ki ** 3 / np.tan(thetam)
Ra = kf ** 3 / np.tan(thetaa)
R0_ = Rm * Ra * (2. * np.pi) ** 4 / (64. * np.pi ** 2 * np.sin(thetam) * np.sin(thetaa))
if method == 1 or method == 'popovici':
# Popovici
R0_ = R0_ * np.sqrt(np.linalg.det(F) / np.linalg.det(H + G))
else:
# Cooper-Nathans (popovici Eq 5 and 9)
R0_ = R0_ * np.sqrt(np.linalg.det(F) / np.linalg.det(H))
# Normalization to flux on monitor
if moncor == 1:
g = G[:4, :4]
f = F[:2, :2]
c = C[:2, :4]
t = np.matrix([[-1. / (2. * L0), 0, np.cos(thetam) * (1. / L1mon - 1. / L0) / 2.,
np.sin(thetam) * (1. / L0 + 1. / L1mon - 2. / (monorh * np.sin(thetam))) / 2., 0, 0,
1. / (2. * L1mon)],
[0, -1. / (2. * L0 * np.sin(thetam)), 0, 0,
(1. / L0 + 1. / L1mon - 2. * np.sin(thetam) / monorv) / (2. * np.sin(thetam)), 0, 0]],
dtype=np.float64)
sinv = blkdiag(np.array(bshape, dtype=np.float64), mshape, monitorshape) # S-1 matrix
s = | np.linalg.inv(sinv) | numpy.linalg.inv |
import argparse
import os
from ast import literal_eval
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import pommerman
from evaluation import constants
from pommerman.dqn import utils
home_directory = os.path.expanduser('~')
result_path = os.path.join(home_directory, 'dev/playground/battle_csv')
def calculate_success_rate():
for setting in constants.GameSetUp:
dict_name = utils.get_path_from_setting(setting.value, result_path)
csv_path = os.path.join(dict_name, 'results.csv')
if not os.path.exists(csv_path):
continue
print(csv_path)
data = pd.read_csv(csv_path)
# Convert reward string to array
# Convert agents tuple to array
data["Rewards"] = data["Rewards"].apply(literal_eval)
data["Agents"] = data["Agents"].apply(literal_eval)
# First index of Rewards is result of first team, second index of Rewards is result of second team
data[str(setting.value[0])] = data.Rewards.map(lambda x: x[0] if x[0] == 1 else 0)
data[str(setting.value[1])] = data.Rewards.map(lambda x: x[1] if x[1] == 1 else 0)
# Add total of wins
data.at["Total_wins", str(setting.value[0])] = data[str(setting.value[0])].sum()
data.at["Total_wins", str(setting.value[1])] = data[str(setting.value[1])].sum()
# Calculate success rate
data.at["Success_rate", str(setting.value[0])] = (data.at["Total_wins",
str(setting.value[0])] / constants.run_battle_num_times) * 100
data.at["Success_rate", str(setting.value[1])] = (data.at["Total_wins",
str(setting.value[1])] / constants.run_battle_num_times) * 100
final_path = csv_path.replace('.', '_copy.')
print(final_path)
data.to_csv(final_path, index=True, mode='w')
def do_qualitative_analysis():
battles_with_comm_xp = [setting for setting in constants.GameSetUp if 'xp::comm' in setting.value]
for setting in battles_with_comm_xp:
dict_name = utils.get_path_from_setting(setting.value, result_path)
csv_path = os.path.join(dict_name, 'state_message_pairs.csv')
if not os.path.exists(csv_path):
continue
print(csv_path)
data = pd.read_csv(csv_path, header=None, names=["Episode", "Scenario", "Message"])
final_data = pd.DataFrame(columns=["Episode", "Scenario", "Message", "Frequency"])
for episode in range(0, constants.run_battle_num_times):
df = data[data.Episode == episode]
for scenario in constants.Scenario:
tmp_df = df[df.Scenario == scenario.value]
frequency = np.arange(pommerman.constants.RADIO_VOCAB_SIZE + 1)
for _, row in tmp_df.iterrows():
frequency[row["Message"]] += 1
for index, _frequency in enumerate(frequency):
final_data.loc[len(final_data)] = [episode, scenario, index, _frequency]
# Add std
g = sb.catplot(data=final_data, x='Message', y='Frequency', col='Scenario', kind='bar', ci=None,
sharex=False, sharey=False)
fig_name = 'state_message_dist.svg'
setting_directory = utils.get_path_from_setting(setting.value, '/home/patricia/GoogleDrive/BA/latex/text/pictures')
if not os.path.exists(setting_directory):
os.mkdir(setting_directory)
fig_path = os.path.join(setting_directory, fig_name)
# plt.savefig(fig_path, format='svg')
plt.show()
def analyse_message_action_co_occurrence(prefix, metric_name):
battles_with_comm_xp = [setting for setting in constants.GameSetUp if 'xp::comm' in setting.value]
for setting in battles_with_comm_xp:
dict_name = utils.get_path_from_setting(setting.value, result_path)
csv_path = os.path.join(dict_name, prefix + '_message_action_pairs.csv')
if not os.path.exists(csv_path):
continue
print(csv_path)
data = pd.read_csv(csv_path, header=None, names=["Episode", "Message", "Action"])
data_copy = data.copy()
for episode in range(0, constants.run_battle_num_times):
df = data[data.Episode == episode]
print(df)
co_occurrence = np.zeros([pommerman.constants.RADIO_VOCAB_SIZE + 1, len(pommerman.constants.Action)])
for index, row in df.iterrows():
co_occurrence[row["Message"], row["Action"]] += 1
p_am = co_occurrence / np.sum(co_occurrence)
p_a = np.sum(co_occurrence, axis=0) / np.sum(co_occurrence)
p_m = | np.sum(co_occurrence, axis=1) | numpy.sum |
"""The beam search module."""
from collections import OrderedDict
from six.moves import range
import logging
import numpy
from picklable_itertools.extras import equizip
import theano
from theano import function
from blocks_extras.bricks.sequence_generator2 import SequenceGenerator
from blocks.filter import VariableFilter, get_application_call, get_brick
from blocks.graph import ComputationGraph
from blocks.roles import INPUT, OUTPUT
logger = logging.getLogger(__name__)
class CandidateNotFoundError(Exception):
pass
class BeamSearch(object):
"""Approximate search for the most likely sequence.
Beam search is an approximate algorithm for finding :math:`y^* =
argmax_y P(y|c)`, where :math:`y` is an output sequence, :math:`c` are
the contexts, :math:`P` is the output distribution of a
:class:`.SequenceGenerator`. At each step it considers :math:`k`
candidate sequence prefixes. :math:`k` is called the beam size, and the
sequence are called the beam. The sequences are replaced with their
:math:`k` most probable continuations, and this is repeated until
end-of-line symbol is met.
The beam search compiles quite a few Theano functions under the hood.
Normally those are compiled at the first :meth:`search` call, but
you can also explicitly call :meth:`compile`.
Parameters
----------
beam_size : int
The beam size.
samples : :class:`~theano.Variable`
An output of a sampling computation graph built by
:meth:`~blocks.brick.SequenceGenerator.generate`, the one
corresponding to sampled sequences.
See Also
--------
:class:`.SequenceGenerator`
Notes
-----
Sequence generator should use an emitter which has `probs` method
e.g. :class:`SoftmaxEmitter`.
Does not support dummy contexts so far (all the contexts must be used
in the `generate` method of the sequence generator for the current code
to work).
"""
def __init__(self, beam_size, samples):
self.beam_size = beam_size
# Extracting information from the sampling computation graph
cg = ComputationGraph(samples)
self.inputs = cg.inputs
self.generator = get_brick(samples)
if not isinstance(self.generator, SequenceGenerator):
raise ValueError
self.generate_call = get_application_call(samples)
if (not self.generate_call.application ==
self.generator.generate):
raise ValueError
self.inner_cg = ComputationGraph(self.generate_call.inner_outputs)
# Fetching names from the sequence generator
self.context_names = self.generator.generate.contexts
self.state_names = self.generator.generate.states
# Parsing the inner computation graph of sampling scan
self.contexts = [
VariableFilter(bricks=[self.generator],
name=name,
roles=[INPUT])(self.inner_cg)[0]
for name in self.context_names]
self.input_states = []
# Includes only those state names that were actually used
# in 'generate'
self.input_state_names = []
for name in self.generator.generate.states:
var = VariableFilter(
bricks=[self.generator], name=name,
roles=[INPUT])(self.inner_cg)
if var:
self.input_state_names.append(name)
self.input_states.append(var[0])
self.compiled = False
def _compile_context_computer(self):
self.context_computer = function(
self.inputs, self.contexts, on_unused_input='ignore')
def _compile_initial_state_computer(self):
# TODO: should be now extractable from the computation graph
initial_states = self.generator.initial_states(
1, as_dict=True,
**dict(equizip(self.context_names, self.contexts)))
self.initial_state_computer = function(
self.contexts, initial_states, on_unused_input='ignore')
def _compile_next_state_computer(self):
next_states = [VariableFilter(bricks=[self.generator],
name=name,
roles=[OUTPUT])(self.inner_cg)[-1]
for name in self.state_names]
next_outputs = VariableFilter(
applications=[self.generator.readout.sample],
name='samples')(self.inner_cg.variables)
self.next_state_computer = function(
self.contexts + self.input_states + next_outputs, next_states,
# This is temporarily required because `lm_logprobs` is a weird
# state which is not used to compute next state, but used to
# compute the next output.
on_unused_input='ignore')
def _compile_logprobs_computer(self):
# This filtering should return identical variables
# (in terms of computations) variables, and we do not care
# which to use.
scores = self.generator.readout.scores(
**dict(zip(self.context_names, self.contexts) +
zip(self.input_state_names, self.input_states)))
self.logprobs_computer = function(
self.contexts + self.input_states, -scores,
on_unused_input='ignore')
def compile(self):
"""Compile all Theano functions used."""
self._compile_context_computer()
self._compile_initial_state_computer()
self._compile_next_state_computer()
self._compile_logprobs_computer()
self.compiled = True
def compute_contexts(self, inputs):
"""Computes contexts from inputs.
Parameters
----------
inputs : dict
Dictionary of input arrays.
Returns
-------
A {name: :class:`numpy.ndarray`} dictionary of contexts ordered
like `self.context_names`.
"""
contexts = self.context_computer(*[inputs[var]
for var in self.inputs])
return OrderedDict(equizip(self.context_names, contexts))
def compute_initial_states(self, contexts):
"""Computes initial states.
Parameters
----------
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
Returns
-------
A {name: :class:`numpy.ndarray`} dictionary of states ordered like
`self.state_names`.
"""
return self.initial_state_computer(*list(contexts.values()))
def compute_logprobs(self, contexts, states):
"""Compute log probabilities of all possible outputs.
Parameters
----------
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
Returns
-------
A :class:`numpy.ndarray` of the (beam size, number of possible
outputs) shape.
"""
input_states = [states[name] for name in self.input_state_names]
return self.logprobs_computer(*(list(contexts.values()) +
input_states))
def compute_next_states(self, contexts, states, outputs):
"""Computes next states.
Parameters
----------
contexts : dict
A {name: :class:`numpy.ndarray`} dictionary of contexts.
states : dict
A {name: :class:`numpy.ndarray`} dictionary of states.
outputs : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of this step outputs.
Returns
-------
A {name: numpy.array} dictionary of next states.
"""
input_states = [states[name] for name in self.input_state_names]
next_values = self.next_state_computer(*(list(contexts.values()) +
input_states + [outputs]))
return OrderedDict(equizip(self.state_names, next_values))
@staticmethod
def _smallest(matrix, k):
"""Find k smallest elements of a matrix.
Parameters
----------
matrix : :class:`numpy.ndarray`
The matrix.
k : int
The number of smallest elements required.
Returns
-------
Tuple of ((row numbers, column numbers), values).
"""
flatten = matrix.flatten()
if flatten.shape[0] > k:
args = numpy.argpartition(flatten, k)[:k]
else:
args = numpy.arange(flatten.shape[0])
args = args[numpy.argsort(flatten[args])]
return numpy.unravel_index(args, matrix.shape), flatten[args]
def search(self, input_values, eol_symbol, max_length,
ignore_first_eol=False, as_arrays=False,
char_discount=0, round_to_inf=1e9,
stop_on='patience', consider_all_eos=False,
validate_solution_function=None):
"""Performs beam search.
If the beam search was not compiled, it also compiles it.
Parameters
----------
input_values : dict
A {:class:`~theano.Variable`: :class:`~numpy.ndarray`}
dictionary of input values. The shapes should be
the same as if you ran sampling with batch size equal to
`beam_size`. Put it differently, the user is responsible
for duplicaling inputs necessary number of times, because
this class has insufficient information to do it properly.
eol_symbol : int
End of sequence symbol, the search stops when the symbol is
generated.
max_length : int
Maximum sequence length, the search stops when it is reached.
ignore_first_eol : bool, optional
When ``True``, the end if sequence symbol generated at the
first iteration are ignored. This useful when the sequence
generator was trained on data with identical symbols for
sequence start and sequence end.
as_arrays : bool, optional
If ``True``, the internal representation of search results
is returned, that is a (matrix of outputs, mask,
costs of all generated outputs) tuple.
Returns
-------
outputs : list of lists of ints
A list of the `beam_size` best sequences found in the order
of decreasing likelihood.
costs : list of floats
A list of the costs for the `outputs`, where cost is the
negative log-likelihood.
"""
if validate_solution_function:
raise ValueError
if not self.compiled:
self.compile()
contexts = self.compute_contexts(input_values)
large_contexts = OrderedDict(contexts)
states = self.compute_initial_states(contexts)
# This array will store all generated outputs, including those from
# previous step and those from already finished sequences.
all_outputs = -1 * numpy.ones((1, 1), dtype='int64')
all_costs = numpy.zeros_like(all_outputs, dtype=theano.config.floatX)
done = []
min_cost = 1000
for i in range(max_length):
if len(states.values()[0].flatten()) == 0:
break
if stop_on == 'patience':
done = sorted(done, key=lambda x: x[1][-1] - char_discount * len(x[1]))
done = done[:self.beam_size]
if done:
current_best_cost = done[0][1][-1] - char_discount * len(done[0][1])
if current_best_cost < min_cost:
min_cost = current_best_cost
patience = 30
else:
patience -= 1
if patience == 0:
break
elif stop_on == 'optimistic_future_cost':
# stop only when we have at least self.beam_size sequences,
# that are all cheaper than we can possibly obtain by extending
# other ones
if (len(done) >= self.beam_size):
optimistic_future_cost = (all_costs[-1, :].min() -
char_discount * max_length)
last_in_done = done[self.beam_size - 1][1]
# note: done is sorted by the cost with char discount subtracted
last_in_done_cost = (last_in_done[-1] -
char_discount * len(last_in_done))
if last_in_done_cost < optimistic_future_cost:
break
else:
raise ValueError('Unknown stopping criterion {}'.format(stop_on))
# Broadcasting of contexts, should happen only once
if large_contexts.values()[0].shape[1] != states.values()[0].shape[0]:
# logger.debug("Reshape contexts 1")
for name, ctx in contexts.items():
large_contexts[name] = numpy.take(ctx, [0]*states.values()[0].shape[0], axis=1)
logprobs = self.compute_logprobs(large_contexts, states)
assert numpy.isfinite(logprobs).all()
next_costs = (all_costs[-1, :, None] + logprobs)
if consider_all_eos:
for idx in range(self.beam_size):
candidate = numpy.concatenate(
[all_outputs[:, idx], [eol_symbol]])
costs = numpy.concatenate([all_costs[:, idx],
[next_costs[idx, eol_symbol]]])
done.append((candidate, costs))
(indices, outputs), chosen_costs = self._smallest(
next_costs, self.beam_size)
# Rearrange everything
for name in states:
states[name] = numpy.take(states[name], indices, axis=0)
all_outputs = numpy.take(all_outputs, indices, axis=1)
all_costs = numpy.take(all_costs, indices, axis=1)
if large_contexts.values()[0].shape[1] != states.values()[0].shape[0]:
# logger.debug('Reshape contexts 2')
for name, ctx in contexts.items():
large_contexts[name] = numpy.take(ctx, [0]*states.values()[0].shape[0], axis=1)
states = self.compute_next_states(large_contexts, states, outputs)
all_outputs = numpy.vstack([all_outputs, outputs[None, :]])
all_costs = numpy.vstack([all_costs, chosen_costs[None, :]])
mask = outputs != eol_symbol
if ignore_first_eol and i == 0:
mask[:] = 1
for idx in numpy.where(
(all_outputs[-1] == eol_symbol) &
(all_costs[-1] - all_costs[-2] < round_to_inf))[0]:
if (validate_solution_function is None or
validate_solution_function(input_values,
all_outputs[:, idx])):
done.append((all_outputs[:, idx], all_costs[:, idx]))
unfinished = numpy.where(mask == 1)[0]
for name in states:
states[name] = | numpy.take(states[name], unfinished, axis=0) | numpy.take |
import logging
import random
import urllib
from enum import Enum, auto
from typing import Any, List
from zipfile import ZipFile
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from diskcache import Cache
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from ca.difficulty.difficulty_estimators import (
BertDifficultyEstimator, FleschKincaidDifficultyEstimator,
MaskedLanguageModelScorer, UncertaintyDifficultyEstimator)
from ca.featurizer import CachedSentenceTransformer, SentenceFeature
from ca.models.neuralnets.lstm_tagger import LSTMTagger
from ca.models.neuralnets.mlp_classifier import MLPClassifier
from ca.paths import PATH_CACHE, PATH_CACHE_GLOVE
class StrategyType(Enum):
RANDOM = auto()
SENTENCE_LENGTH = auto()
FLESCH_KINCAID_GRADE_LEVEL = auto()
FLESCH_KINCAID_READING_EASE = auto()
BERT_PREDICTION_DIFFICULTY = auto()
MASKED_LANGUAGE_MODEL_SCORE = auto()
ANNOTATION_TIME = auto()
MODEL_DIFFICULTY = auto()
class Strategy:
def __init__(self):
self._episode = 0
def init_tagging(
self,
X_so_far: List[List[List[str]]],
Xf_so_far: List[Any],
y_so_far: List[List[List[str]]],
X_unchosen: List[List[List[str]]],
Xf_unchosen: List[Any],
X_eval: List[List[str]],
Xf_eval: List[Any],
):
pass
def init_document_classification(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
pass
def init_pairwise(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
pass
def select(self) -> int:
raise NotImplementedError()
def argsort_unchosen(self, chunk_size: int) -> List[int]:
raise NotImplementedError()
def argsort_eval(self) -> np.ndarray:
raise NotImplementedError()
@property
def name(self) -> str:
return self.__class__.__name__
class RandomStrategy(Strategy):
def __init__(self):
super().__init__()
self._X_unchosen = None
self._Xf_unchosen = None
self._Xf_eval = None
def init_tagging(
self,
X_so_far: List[List[List[str]]],
Xf_so_far: List[Any],
y_so_far: List[List[List[str]]],
X_unchosen: List[List[List[str]]],
Xf_unchosen: List[Any],
X_eval: List[List[str]],
Xf_eval: List[Any],
):
self._X_unchosen = X_unchosen
self._Xf_unchosen = Xf_unchosen
self._Xf_eval = Xf_eval
def init_document_classification(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
self._X_unchosen = X_unchosen
self._Xf_unchosen = Xf_unchosen
self._Xf_eval = Xf_eval
def init_pairwise(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
self._X_unchosen = X_unchosen
self._Xf_unchosen = Xf_unchosen
self._Xf_eval = Xf_eval
def select(self) -> int:
return random.randrange(len(self._X_unchosen))
def argsort_unchosen(self, chunk_size: int) -> List[int]:
assert len(self._X_unchosen) == len(self._Xf_unchosen)
size = len(self._X_unchosen)
idx = list(range(size))
random.shuffle(idx)
return idx
def argsort_eval(self) -> np.ndarray:
size = len(self._Xf_eval)
idx = np.arange(size)
np.random.shuffle(idx)
return idx
class FleschKincaidlReadabilityStrategy(Strategy):
def __init__(self, score: str):
super().__init__()
self._estimator = FleschKincaidDifficultyEstimator()
self._scores: List[float] = []
self._scores_eval: List[float] = []
self._ranks_eval: np.ndarray = None
self._X_unchosen: List[List[str]] = []
self._score = score
def init_tagging(
self,
X_so_far: List[List[List[str]]],
Xf_so_far: List[Any],
y_so_far: List[List[List[str]]],
X_unchosen: List[List[List[str]]],
Xf_unchosen: List[Any],
X_eval: List[List[str]],
Xf_eval: List[Any],
):
logging.info("Precomputing Flesch-Kincaid grade readability scores")
self._X_unchosen = X_unchosen
for document in X_unchosen:
text = " ".join(" ".join(sent) for sent in document)
score = self._compute_score(text)
self._scores.append(score)
for sentence in X_eval:
text = " ".join(sentence)
score = self._compute_score(text)
self._scores_eval.append(score)
self._ranks_eval = np.argsort(self._scores_eval)
def init_document_classification(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
logging.info("Precomputing Flesch-Kincaid grade readability scores")
self._X_unchosen = X_unchosen
for sentence in X_unchosen:
score = self._compute_score(sentence)
self._scores.append(score)
for sentence in X_eval:
score = self._compute_score(sentence)
self._scores_eval.append(score)
self._ranks_eval = np.argsort(self._scores_eval)
def init_pairwise(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
logging.info("Precomputing Flesch-Kincaid grade readability scores")
self._X_unchosen = X_unchosen
for sentence in X_unchosen:
score = self._compute_score(sentence)
self._scores.append(score)
for sentence in X_eval:
score = self._compute_score(sentence)
self._scores_eval.append(score)
self._ranks_eval = np.argsort(self._scores_eval)
def select(self) -> int:
# scores is array of size NUM_ITEMS (num docs or num sentences)
# We want to select the easiest one, that is the one with the lowest score
assert len(self._scores) == len(self._X_unchosen), f"{len(self._scores)} != {len(self._X_unchosen)}"
idx = int(np.argmin(self._scores))
self._scores.pop(idx)
return idx
def argsort_unchosen(self, chunk_size: int) -> List[int]:
# scores is array of size NUM_ITEMS (num docs or num sentences)
assert len(self._scores) == len(self._X_unchosen), f"{len(self._scores)} != {len(self._X_unchosen)}"
indices = | np.argsort(self._scores) | numpy.argsort |
"""Tests for Kronecker-type linear operators."""
import numpy as np
import pytest
import pytest_cases
import probnum as pn
@pytest_cases.parametrize_with_cases(
"linop,matrix",
cases=".test_linops_cases.kronecker_cases",
has_tag="symmetric_kronecker",
)
def test_symmetric_kronecker_commutative(
linop: pn.linops.SymmetricKronecker, matrix: np.ndarray
):
linop_commuted = pn.linops.SymmetricKronecker(linop.B, linop.A)
np.testing.assert_array_equal(linop.todense(), linop_commuted.todense())
np.testing.assert_almost_equal(linop_commuted.todense(), matrix)
@pytest.mark.parametrize(
"A,B", [(np.array([[5, 1], [1, 10]]), np.array([[-2, 0.1], [0.1, 8]]))]
)
def test_symmetric_kronecker_symmetric_factors(A, B):
"""Dense matrix from symmetric Kronecker product of two symmetric matrices must be
symmetric."""
linop = pn.linops.SymmetricKronecker(A, B)
linop_transpose = linop.T
linop_dense = linop.todense()
np.testing.assert_array_equal(linop_dense, linop_dense.T)
np.testing.assert_array_equal(linop_dense, linop_transpose.todense())
@pytest.mark.parametrize("n", [1, 2, 3, 5, 12])
def test_symmetrize(n):
rng = np.random.default_rng(42)
x = rng.uniform(size=n * n)
X = np.reshape(x, (n, n))
y = pn.linops.Symmetrize(n) @ x
np.testing.assert_array_equal(
y.reshape(n, n), 0.5 * (X + X.T), err_msg="Matrix not symmetric."
)
Z = rng.uniform(size=(9, 5))
W = pn.linops.Symmetrize(3) @ Z
np.testing.assert_array_equal(
W,
np.vstack([pn.linops.Symmetrize(3) @ col for col in Z.T]).T,
err_msg="Matrix columns were not symmetrized.",
)
np.testing.assert_array_equal(
| np.shape(W) | numpy.shape |
#!/usr/bin/env ipython
# Evaluation of models
#
import json
import pdb
import numpy as np
import pandas as pd
from eugenium_mmd import MMD_3_Sample_Test
from scipy.stats import ks_2samp
import mmd
from sklearn.svm import SVC
from sklearn.metrics import classification_report, precision_recall_fscore_support, accuracy_score, roc_auc_score, average_precision_score
from sklearn.ensemble import RandomForestClassifier
import sklearn
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# for keras
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.backend import clear_session
import model
import data_utils
import plotting
import pickle
def assert_same_data(A, B):
# case 0, both loaded
if A['data'] == 'load' and B['data'] == 'load':
assert A['data_load_from'] == B['data_load_from']
data_path = './experiments/data/' + A['data_load_from']
elif A['data'] == 'load' and (not B['data'] == 'load'):
assert A['data_load_from'] == B['identifier']
data_path = './experiments/data/' + A['data_load_from']
elif (not A['data'] == 'load') and B['data'] == 'load':
assert B['data_load_from'] == A['identifier']
data_path = './experiments/data/' + A['identifier']
else:
raise ValueError(A['data'], B['data'])
return data_path
def model_memorisation(identifier, epoch, max_samples=2000, tstr=False):
"""
Compare samples from a model against training set and validation set in mmd
"""
if tstr:
print('Loading data from TSTR experiment (not sampling from model)')
# load pre-generated samples
synth_data = np.load('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy').item()
model_samples = synth_data['samples']
synth_labels = synth_data['labels']
# load real data used in that experiment
real_data = np.load('./experiments/data/' + identifier + '.data.npy').item()
real_samples = real_data['samples']
train = real_samples['train']
test = real_samples['test']
n_samples = test.shape[0]
if model_samples.shape[0] > n_samples:
model_samples = np.random.permutation(model_samples)[:n_samples]
print('Data loaded successfully!')
else:
if identifier == 'cristobal_eICU':
model_samples = pickle.load(open('REDACTED', 'rb'))
samples, labels = data_utils.eICU_task()
train = samples['train'].reshape(-1,16,4)
vali = samples['vali'].reshape(-1,16,4)
test = samples['test'].reshape(-1,16,4)
#train_targets = labels['train']
#vali_targets = labels['vali']
#test_targets = labels['test']
train, vali, test = data_utils.scale_data(train, vali, test)
n_samples = test.shape[0]
if n_samples > max_samples:
n_samples = max_samples
test = np.random.permutation(test)[:n_samples]
if model_samples.shape[0] > n_samples:
model_samples = np.random.permutation(model_samples)[:n_samples]
elif identifier == 'cristobal_MNIST':
the_dir = 'REDACTED'
# pick a random one
which = np.random.choice(['NEW_OK_', '_r4', '_r5', '_r6', '_r7'])
model_samples, model_labels = pickle.load(open(the_dir + 'synth_mnist_minist_cdgan_1_2_100_multivar_14_nolr_rdim3_0_2_' + which + '_190.pk', 'rb'))
# get test and train...
# (generated with fixed seed...)
mnist_resized_dim = 14
samples, labels = data_utils.load_resized_mnist(mnist_resized_dim)
proportions = [0.6, 0.2, 0.2]
train, vali, test, labels_split = data_utils.split(samples, labels=labels, random_seed=1, proportions=proportions)
np.random.seed()
train = train.reshape(-1, 14, 14)
test = test.reshape(-1, 14, 14)
vali = vali.reshape(-1, 14, 14)
n_samples = test.shape[0]
if n_samples > max_samples:
n_samples = max_samples
test = np.random.permutation(test)[:n_samples]
if model_samples.shape[0] > n_samples:
model_samples = np.random.permutation(model_samples)[:n_samples]
else:
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
# get the test, train sets
data = np.load('./experiments/data/' + identifier + '.data.npy').item()
train = data['samples']['train']
test = data['samples']['test']
n_samples = test.shape[0]
if n_samples > max_samples:
n_samples = max_samples
test = np.random.permutation(test)[:n_samples]
model_samples = model.sample_trained_model(settings, epoch, n_samples)
all_samples = np.vstack([train, test, model_samples])
heuristic_sigma = mmd.median_pairwise_distance(all_samples)
print('heuristic sigma:', heuristic_sigma)
pvalue, tstat, sigma, MMDXY, MMDXZ = MMD_3_Sample_Test(model_samples, test, np.random.permutation(train)[:n_samples], sigma=heuristic_sigma, computeMMDs=False)
#pvalue, tstat, sigma, MMDXY, MMDXZ = MMD_3_Sample_Test(model_samples, np.random.permutation(train)[:n_samples], test, sigma=heuristic_sigma, computeMMDs=False)
# if pvalue < 0.05:
# print('At confidence level 0.05, we reject the null hypothesis that MMDXY <= MMDXZ, and conclude that the test data has a smaller MMD with the true data than the generated data')
# the function takes (X, Y, Z) as its first arguments, it's testing if MMDXY (i.e. MMD between model and train) is less than MMDXZ (MMd between model and test)
# else:
# print('We have failed to reject the null hypothesis that MMDXY <= MMDXZ, and cannot conclu#de that the test data has a smaller MMD with the true data than the generated data')
return pvalue, tstat, sigma
def model_comparison(identifier_A, identifier_B, epoch_A=99, epoch_B=99):
"""
Compare two models using relative MMD test
"""
# make sure they used the same data
settings_A = json.load(open('./experiments/settings/' + identifier_A + '.txt', 'r'))
settings_B = json.load(open('./experiments/settings/' + identifier_B + '.txt', 'r'))
data_path = assert_same_data(settings_A, settings_B)
# now load the data
data = np.load(data_path + '.data.npy').item()['samples']['vali']
n_samples = data.shape[0]
A_samples = model.sample_trained_model(settings_A, epoch_A, n_samples)
B_samples = model.sample_trained_model(settings_B, epoch_B, n_samples)
# do the comparison
# TODO: support multiple signals
## some notes about this test:
## MMD_3_Sample_Test(X, Y, Z) tests the hypothesis that Px is closer to Pz than Py
## that is, test the null hypothesis H0:
## MMD(F, Px, Py) <= MMD(F, Px, Pz)
## versus the alternate hypothesis:
## MMD(F, Px, Py) > MMD(F, Px, Pz)
## at significance level that we select later (just the threshold on the p-value)
pvalue, tstat, sigma, MMDXY, MMDXZ = MMD_3_Sample_Test(data[:, :, 0], A_samples[:, :, 0], B_samples[:, :, 0], computeMMDs=True)
print(pvalue, tstat, sigma)
if pvalue < 0.05:
print('At confidence level 0.05, we reject the null hypothesis that MMDXY <= MMDXZ, and conclude that', identifier_B, 'has a smaller MMD with the true data than', identifier_A)
else:
print('We have failed to reject the null hypothesis that MMDXY <= MMDXZ, and cannot conclude that', identifier_B, 'has a smaller MMD with the true data than', identifier_A)
return pvalue, tstat, sigma, MMDXY, MMDXZ
# --- to do with reconstruction --- #
def get_reconstruction_errors(identifier, epoch, g_tolerance=0.05, max_samples=1000, rerun=False, tstr=False):
"""
Get the reconstruction error of every point in the training set of a given
experiment.
"""
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
if settings['data_load_from']:
data_dict = np.load('./experiments/data/' + settings['data_load_from'] + '.data.npy').item()
else:
data_dict = np.load('./experiments/data/' + identifier + '.data.npy').item()
samples = data_dict['samples']
train = samples['train']
vali = samples['vali']
test = samples['test']
labels = data_dict['labels']
train_labels, test_labels, synth_labels, vali_labels = None, None, None, None
try:
if rerun:
raise FileNotFoundError
errors = np.load('./experiments/eval/' + identifier + '_' + str(epoch) + '_' + str(g_tolerance) + '.reconstruction_errors.npy').item()
train_errors = errors['train']
test_errors = errors['test']
generated_errors = errors['generated']
noisy_errors = errors['noisy']
print('Loaded precomputed errors')
except FileNotFoundError:
if tstr:
synth_data = np.load('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy').item()
generated = synth_data['samples']
synth_labels = synth_data['labels']
train_labels = labels['train']
test_labels = labels['test']
vali_labels = labels['vali']
else:
# generate new data
n_eval = 500
# generate "easy" samples from the distribution
generated = model.sample_trained_model(settings, epoch, n_eval)
# generate "hard' random samples, not from train/test distribution
# TODO: use original validation examples, add noise etc.
## random_samples = np.random.normal(size=generated.shape)
# random_samples -= np.mean(random_samples, axis=0)
# random_samples += np.mean(vali, axis=0)
# random_samples /= np.std(random_samples, axis=0)
# random_samples *= np.std(vali, axis=0)
# get all the errors
print('Getting reconstruction errors on train set')
if train.shape[0] > max_samples:
index_subset = np.random.permutation(train.shape[0])[:max_samples]
train = train[index_subset]
if train_labels is not None:
train_labels = train_labels[index_subset]
train_errors = error_per_sample(identifier, epoch, train, n_rep=5, g_tolerance=g_tolerance, C_samples=train_labels)
print('Getting reconstruction errors on test set')
if test.shape[0] > max_samples:
index_subset = np.random.permutation(test.shape[0])[:max_samples]
test = test[index_subset]
if test_labels is not None:
test_labels = test_labels[index_subset]
test_errors = error_per_sample(identifier, epoch, test, n_rep=5, g_tolerance=g_tolerance, C_samples=test_labels)
D_test, p_test = ks_2samp(train_errors, test_errors)
print('KS statistic and p-value for train v. test erors:', D_test, p_test)
pdb.set_trace()
print('Getting reconstruction errors on generated set')
generated_errors = error_per_sample(identifier, epoch, generated, n_rep=5, g_tolerance=g_tolerance, C_samples=synth_labels)
D_gen, p_gen = ks_2samp(generated_errors, train_errors)
print('KS statistic and p-value for train v. gen erors:', D_gen, p_gen)
D_gentest, p_gentest = ks_2samp(generated_errors, test_errors)
print('KS statistic and p-value for gen v. test erors:', D_gentest, p_gentest)
# print('Getting reconstruction errors on noisy set')
# alpha = 0.5
# noisy_samples = alpha*vali + (1-alpha)*np.random.permutation(vali)
# noisy_errors = error_per_sample(identifier, epoch, noisy_samples, n_rep=5, g_tolerance=g_tolerance, C_samples=vali_labels)
noisy_errors = None
# save!
errors = {'train': train_errors, 'test': test_errors, 'generated': generated_errors, 'noisy': noisy_errors}
np.save('./experiments/eval/' + identifier + '_' + str(epoch) + '_' + str(g_tolerance) + '.reconstruction_errors.npy', errors)
# do two-sample Kolomogorov-Smirnov test for equality
D_test, p_test = ks_2samp(train_errors, test_errors)
print('KS statistic and p-value for train v. test erors:', D_test, p_test)
D_gen, p_gen = ks_2samp(generated_errors, train_errors)
print('KS statistic and p-value for train v. gen erors:', D_gen, p_gen)
D_gentest, p_gentest = ks_2samp(generated_errors, test_errors)
print('KS statistic and p-value for gen v. test erors:', D_gentest, p_gentest)
# visualise distribution of errors for train and test
plotting.reconstruction_errors(identifier + '_' + str(epoch) + '_' + str(g_tolerance), train_errors, test_errors, generated_errors, noisy_errors)
# visualise the "hardest" and "easiest" samples from train
ranking_train = np.argsort(train_errors)
easiest_train = ranking_train[:6]
hardest_train = ranking_train[-6:]
plotting.save_plot_sample(train[easiest_train], epoch, identifier + '_easytrain', n_samples=6, num_epochs=None, ncol=2)
plotting.save_plot_sample(train[hardest_train], epoch, identifier + '_hardtrain', n_samples=6, num_epochs=None, ncol=2)
# visualise the "hardest" and "easiest" samples from random
# ranking_random = np.argsort(noisy_errors)
# easiest_random = ranking_random[:6]
# hardest_random = ranking_random[-6:]
# plotting.save_plot_sample(random_samples[easiest_random], epoch, identifier + '_easyrandom', n_samples=6, num_epochs=None, ncol=2)
# plotting.save_plot_sample(random_samples[hardest_random], epoch, identifier + '_hardrandom', n_samples=6, num_epochs=None, ncol=2)
return True
def error_per_sample(identifier, epoch, samples, n_rep=3, n_iter=None, g_tolerance=0.025, use_min=True, C_samples=None):
"""
Get (average over a few runs) of the reconstruction error per sample
"""
n_samples = samples.shape[0]
heuristic_sigma = np.float32(mmd.median_pairwise_distance(samples))
errors = np.zeros(shape=(n_samples, n_rep))
for rep in range(n_rep):
Z, rep_errors, sigma = model.invert(identifier, epoch, samples, n_iter=n_iter, heuristic_sigma=heuristic_sigma, g_tolerance=g_tolerance, C_samples=C_samples)
errors[:, rep] = rep_errors
# return min, or average?
if use_min:
errors = np.min(errors, axis=1)
else:
# use mean
errors = np.mean(errors, axis=1)
return errors
# --- visualisation evaluation --- #
def view_digit(identifier, epoch, digit, n_samples=6):
"""
Generate a bunch of MNIST digits from a CGAN, view them
"""
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
if settings['one_hot']:
assert settings['max_val'] == 1
assert digit <= settings['cond_dim']
C_samples = np.zeros(shape=(n_samples, settings['cond_dim']))
C_samples[:, digit] = 1
else:
assert settings['cond_dim'] == 1
assert digit <= settings['max_val']
C_samples = np.array([digit]*n_samples).reshape(-1, 1)
digit_samples = model.sample_trained_model(settings, epoch, n_samples, Z_samples=None, cond_dim=settings['cond_dim'], C_samples=C_samples)
digit_samples = digit_samples.reshape(n_samples, -1, 1)
# visualise
plotting.save_mnist_plot_sample(digit_samples, digit, identifier + '_' + str(epoch) + '_digit_', n_samples)
return True
def view_interpolation(identifier, epoch, n_steps=6, input_samples=None, e_tolerance=0.01, sigma=3.29286853021):
"""
If samples: generate interpolation between real points
Else:
Sample two points in the latent space, view a linear interpolation between them.
"""
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
if input_samples is None:
# grab two trainng examples
data = np.load('./experiments/data/' + identifier + '.data.npy').item()
train = data['samples']['train']
input_samples = np.random.permutation(train)[:2]
# Z_sampleA, Z_sampleB = model.sample_Z(2, settings['seq_length'], settings['latent_dim'],
# settings['use_time'])
if sigma is None:
## gotta get a sigma somehow
sigma = mmd.median_pairwise_distance(train)
print('Calcualted heuristic sigma from training data:', sigma)
Zs, error, _ = model.invert(settings, epoch, input_samples, e_tolerance=e_tolerance)
Z_sampleA, Z_sampleB = Zs
Z_samples = plotting.interpolate(Z_sampleA, Z_sampleB, n_steps=n_steps)
samples = model.sample_trained_model(settings, epoch, Z_samples.shape[0], Z_samples)
# get distances from generated samples to target samples
d_A, d_B = [], []
for sample in samples:
d_A.append(sample_distance(sample, samples[0], sigma))
d_B.append(sample_distance(sample, samples[-1], sigma))
distances = pd.DataFrame({'dA': d_A, 'dB': d_B})
plotting.save_plot_interpolate(input_samples, samples, epoch, settings['identifier'] + '_epoch' + str(epoch), distances=distances, sigma=sigma)
return True
def view_latent_vary(identifier, epoch, n_steps=6):
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
Z_sample = model.sample_Z(1, settings['seq_length'], settings['latent_dim'],
settings['use_time'])[0]
samples_dim = []
for dim in range(settings['latent_dim']):
Z_samples_dim = plotting.vary_latent_dimension(Z_sample, dim, n_steps)
samples_dim.append(model.sample_trained_model(settings, epoch, Z_samples_dim.shape[0], Z_samples_dim))
plotting.save_plot_vary_dimension(samples_dim, epoch, settings['identifier'] + '_varydim', n_dim=settings['latent_dim'])
return True
def view_reconstruction(identifier, epoch, real_samples, tolerance=1):
"""
Given a set of real samples, find the "closest" latent space points
corresponding to them, generate samples from these, visualise!
"""
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
Zs, error, sigma = model.invert(settings, epoch, real_samples, tolerance=tolerance)
plotting.visualise_latent(Zs[0], identifier+'_' + str(epoch) + '_0')
plotting.visualise_latent(Zs[1], identifier+'_' + str(epoch) + '_1')
model_samples = model.sample_trained_model(settings, epoch, Zs.shape[0], Zs)
plotting.save_plot_reconstruct(real_samples, model_samples, settings['identifier'])
return True
def view_fixed(identifier, epoch, n_samples=6, dim=None):
""" What happens when we give the same point at each time step? """
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
Z_samples = model.sample_Z(n_samples, settings['seq_length'], settings['latent_dim'],
settings['use_time'])
# now, propagate forward the value at time 0 (which time doesn't matter)
for i in range(1, settings['seq_length']):
if dim is None:
Z_samples[:, i, :] = Z_samples[:, 0, :]
else:
Z_samples[:, i, dim] = Z_samples[:, 0, dim]
# now generate
samples = model.sample_trained_model(settings, epoch, n_samples, Z_samples)
# now visualise
plotting.save_plot_sample(samples, epoch, identifier + '_fixed', n_samples)
return True
def view_params(identifier, epoch):
""" Visualise weight matrices in the GAN """
settings = json.load(open('./experiments/settings/' + identifier + '.txt', 'r'))
parameters = model.load_parameters(identifier + '_' + str(epoch))
plotting.plot_parameters(parameters, identifier + '_' + str(epoch))
return True
# --- to do with samples --- #
def sample_distance(sampleA, sampleB, sigma):
"""
I know this isn't the best distance measure, alright.
"""
# RBF!
gamma = 1 / (2 * sigma**2)
similarity = np.exp(-gamma*(np.linalg.norm(sampleA - sampleB)**2))
distance = 1 - similarity
return distance
### --- TSTR ---- ###
def train_CNN(train_X, train_Y, vali_X, vali_Y, test_X):
"""
Train a CNN (code copied/adapted from Cristobal's mnist_keras_trts_0_2)
(ONLY MNIST, ONLY 14x14)
(ONLY DIGITS UP TO 3)
"""
print('Training CNN!')
input_shape = (14,14,1)
batch_size = 128
num_classes = 3
epochs = 1000
m = Sequential()
m.add(Conv2D(16, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
m.add(Conv2D(32, (3, 3), activation='relu'))
m.add(MaxPooling2D(pool_size=(2, 2)))
m.add(Dropout(0.25))
m.add(Flatten())
m.add(Dense(128, activation='relu'))
m.add(Dropout(0.5))
m.add(Dense(num_classes, activation='softmax'))
m.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
earlyStopping=keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=1, mode='auto')
m.fit(np.expand_dims(train_X, axis=-1), train_Y,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(np.expand_dims(vali_X, axis=-1), vali_Y),
callbacks=[earlyStopping])
test_predictions = m.predict(np.expand_dims(test_X, axis=-1))
return test_predictions
def TSTR_mnist(identifier, epoch, generate=True, duplicate_synth=1, vali=True, CNN=False, reverse=False):
"""
Either load or generate synthetic training, real test data...
Load synthetic training, real test data, do multi-class SVM
(basically just this: http://scikit-learn.org/stable/auto_examples/classification/plot_digits_classification.html)
If reverse = True: do TRTS
"""
print('Running TSTR on', identifier, 'at epoch', epoch)
if vali:
test_set = 'vali'
else:
test_set = 'test'
if generate:
data = np.load('./experiments/data/' + identifier + '.data.npy').item()
samples = data['samples']
train_X = samples['train']
test_X = samples[test_set]
labels = data['labels']
train_Y = labels['train']
test_Y = labels[test_set]
# now sample from the model
synth_Y = np.tile(train_Y, [duplicate_synth, 1])
synth_X = model.sample_trained_model(identifier, epoch, num_samples=synth_Y.shape[0], C_samples=synth_Y)
# for use in TRTS
synth_testX = model.sample_trained_model(identifier, epoch, num_samples=test_Y.shape[0], C_samples=test_Y)
synth_data = {'samples': synth_X, 'labels': synth_Y, 'test_samples': synth_testX, 'test_labels': test_Y}
np.save('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy', synth_data)
else:
print('Loading synthetic data from pre-sampled model')
exp_data = np.load('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy').item()
test_X, test_Y = exp_data['test_data'], exp_data['test_labels']
train_X, train_Y = exp_data['train_data'], exp_data['train_labels']
synth_X, synth_Y = exp_data['synth_data'], exp_data['synth_labels']
if reverse:
which_setting = 'trts'
print('Swapping synthetic test set in for real, to do TRTS!')
test_X = synth_testX
else:
print('Doing normal TSTR')
which_setting = 'tstr'
# make classifier
if not CNN:
model_choice = 'RF'
# if multivariate, reshape
if len(test_X.shape) == 3:
test_X = test_X.reshape(test_X.shape[0], -1)
if len(train_X.shape) == 3:
train_X = train_X.reshape(train_X.shape[0], -1)
if len(synth_X.shape) == 3:
synth_X = synth_X.reshape(synth_X.shape[0], -1)
# if one hot, fix
if len(synth_Y.shape) > 1 and not synth_Y.shape[1] == 1:
synth_Y = np.argmax(synth_Y, axis=1)
train_Y = np.argmax(train_Y, axis=1)
test_Y = np.argmax(test_Y, axis=1)
# random forest
#synth_classifier = SVC(gamma=0.001)
#real_classifier = SVC(gamma=0.001)
synth_classifier = RandomForestClassifier(n_estimators=500)
real_classifier = RandomForestClassifier(n_estimators=500)
# fit
real_classifier.fit(train_X, train_Y)
synth_classifier.fit(synth_X, synth_Y)
# test on real
synth_predY = synth_classifier.predict(test_X)
real_predY = real_classifier.predict(test_X)
else:
model_choice = 'CNN'
synth_predY = train_CNN(synth_X, synth_Y, samples['vali'], labels['vali'], test_X)
clear_session()
real_predY = train_CNN(train_X, train_Y, samples['vali'], labels['vali'], test_X)
clear_session()
# CNN setting is all 'one-hot'
test_Y = np.argmax(test_Y, axis=1)
synth_predY = np.argmax(synth_predY, axis=1)
real_predY = np.argmax(real_predY, axis=1)
# report on results
synth_prec, synth_recall, synth_f1, synth_support = precision_recall_fscore_support(test_Y, synth_predY, average='weighted')
synth_accuracy = accuracy_score(test_Y, synth_predY)
synth_auprc = 'NaN'
synth_auroc = 'NaN'
synth_scores = [synth_prec, synth_recall, synth_f1, synth_accuracy, synth_auprc, synth_auroc]
real_prec, real_recall, real_f1, real_support = precision_recall_fscore_support(test_Y, real_predY, average='weighted')
real_accuracy = accuracy_score(test_Y, real_predY)
real_auprc = 'NaN'
real_auroc = 'NaN'
real_scores = [real_prec, real_recall, real_f1, real_accuracy, real_auprc, real_auroc]
all_scores = synth_scores + real_scores
if vali:
report_file = open('./experiments/tstr/vali.' + which_setting + '_report.v3.csv', 'a')
report_file.write('mnist,' + identifier + ',' + model_choice + ',' + str(epoch) + ',' + ','.join(map(str, all_scores)) + '\n')
report_file.close()
else:
report_file = open('./experiments/tstr/' + which_setting + '_report.v3.csv', 'a')
report_file.write('mnist,' + identifier + ',' + model_choice + ',' + str(epoch) + ',' + ','.join(map(str, all_scores)) + '\n')
report_file.close()
# visualise results
try:
plotting.view_mnist_eval(identifier + '_' + str(epoch), train_X, train_Y, synth_X, synth_Y, test_X, test_Y, synth_predY, real_predY)
except ValueError:
print('PLOTTING ERROR')
pdb.set_trace()
print(classification_report(test_Y, synth_predY))
print(classification_report(test_Y, real_predY))
return synth_f1, real_f1
def TSTR_eICU(identifier, epoch, generate=True, vali=True, CNN=False, do_OR=False, duplicate_synth=1, reverse=False):
"""
"""
if vali:
test_set = 'vali'
else:
test_set = 'test'
data = np.load('./experiments/data/' + identifier + '.data.npy').item()
samples = data['samples']
train_X = samples['train']
test_X = samples[test_set]
labels = data['labels']
train_Y = labels['train']
test_Y = labels[test_set]
if generate:
# now sample from the model
synth_Y = np.tile(train_Y, [duplicate_synth, 1])
synth_X = model.sample_trained_model(identifier, epoch, num_samples=synth_Y.shape[0], C_samples=synth_Y)
# for use in TRTS
synth_testX = model.sample_trained_model(identifier, epoch, num_samples=test_Y.shape[0], C_samples=test_Y)
synth_data = {'samples': synth_X, 'labels': synth_Y, 'test_samples': synth_testX, 'test_labels': test_Y}
np.save('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy', synth_data)
else:
print('Loading pre-generated data')
print('WARNING: not implemented for TRTS')
# get "train" data
exp_data = np.load('./experiments/tstr/' + identifier + '_' + str(epoch) + '.data.npy').item()
synth_X = exp_data['samples']
synth_Y = exp_data['labels']
n_synth = synth_X.shape[0]
synth_X = synth_X.reshape(n_synth, -1)
# pdb.set_trace()
# # ALERT ALERT MODIFYING
# synth_X = 2*(synth_X > 0) - 1
orig_data = | np.load('/cluster/home/hyland/eICU_task_data.npy') | numpy.load |
import numpy as np
import matplotlib.pyplot as plt
from os import makedirs
from os.path import isfile, exists
from scipy.constants import mu_0
# from numba import njit
def calcDipolMomentAnalytical(remanence, volume):
""" Calculating the magnetic moment from the remanence in T and the volume in m^3"""
m = remanence * volume / mu_0 # [A * m^2]
return m
def plotSimple(data, FOV, fig, ax, cbar=True, **args):
""" Generate simple colorcoded plot of 2D grid data with contour. Returns axes object."""
im = ax.imshow(data, extent=FOV, origin="lower", **args)
cs = ax.contour(data, colors="k", extent=FOV, origin="lower", linestyles="dotted")
class nf(float):
def __repr__(self):
s = f"{self:.1f}"
return f"{self:.0f}" if s[-1] == "0" else s
cs.levels = [nf(val) for val in cs.levels]
if plt.rcParams["text.usetex"]:
fmt = r"%r"
else:
fmt = "%r"
ax.clabel(cs, cs.levels, inline=True, fmt=fmt, fontsize=10)
if cbar == True:
fig.colorbar(im, ax=ax)
return im
def centerCut(field, axis):
"""return a slice of the data at the center for the specified axis"""
dims = np.shape(field)
return np.take(field, indices=int(dims[axis] / 2), axis=axis)
def isHarmonic(field, sphericalMask, shellMask):
"""Checks if the extrema of the field are in the shell."""
fullField = np.multiply(field, sphericalMask) # [T]
reducedField = np.multiply(field, shellMask)
if int(ptpPPM(fullField)) > int(ptpPPM(reducedField)):
print(
"ptpPPM of field:",
ptpPPM(fullField),
"ptpPPM on surface",
ptpPPM(reducedField),
)
print("Masked field is NOT a harmonic function...")
return False
else:
print(
"ptpPPM of field:",
ptpPPM(fullField),
"ptpPPM on surface",
ptpPPM(reducedField),
)
print("Masked field is harmonic.")
sizeSpherical = int(np.nansum(sphericalMask))
sizeShell = int(np.nansum(shellMask))
print(
"Reduced size of field from {} to {} ({}%)".format(
sizeSpherical, sizeShell, int(100 * sizeShell / sizeSpherical)
)
)
return True
def genQmesh(field, resolution):
"""Generate a mesh of quadratic coordinates"""
mask = np.zeros(np.shape(field))
xAxis = np.linspace(
-(np.size(field, 0) - 1) * resolution / 2,
(np.size(field, 0) - 1) * resolution / 2,
np.size(field, 0),
)
yAxis = np.linspace(
-(np.size(field, 1) - 1) * resolution / 2,
(np.size(field, 1) - 1) * resolution / 2,
np.size(field, 1),
)
zAxis = np.linspace(
-(np.size(field, 2) - 1) * resolution / 2,
(np.size(field, 2) - 1) * resolution / 2,
np.size(field, 2),
)
xAxis, yAxis, zAxis = np.meshgrid(xAxis, yAxis, zAxis)
xAxisSquare = np.square(xAxis)
yAxisSquare = np.square(yAxis)
zAxisSquare = np.square(zAxis)
return mask, xAxisSquare, yAxisSquare, zAxisSquare
def genMask(
field, resolution, diameter=False, shellThickness=False, axis=False, debug=False
):
"""Generate a mask for a spherical shell"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if (shellThickness != False) and (diameter != False):
if debug == True:
print(
"Creating shell mask. (resolution = {}, diameter = {}, shellThickness = {})".format(
resolution, diameter, shellThickness
)
)
print("The shell is added inside the sphere surface!")
rAxisSquare = xAxisSquare + yAxisSquare + zAxisSquare
innerRadiusSquare = (diameter / 2 - shellThickness) ** 2
outerRadiusSquare = (diameter / 2) ** 2
mask[
(rAxisSquare <= outerRadiusSquare) & (rAxisSquare >= innerRadiusSquare)
] = 1
mask[mask == 0] = "NaN"
return mask
def genSphericalMask(field, diameter, resolution):
"""generate spherical mask
with >>diameter<<
for a >>field<< and a given >>resolution<<
"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
mask[xAxisSquare + yAxisSquare + zAxisSquare <= (diameter / 2) ** 2] = 1
mask[mask == 0] = "NaN"
return mask
def genSliceMask(field, diameter, resolution, axis="x"):
"""generate mask for a circular slice
with >>diameter<<
for a >>field<< and a given >>resolution<<
Every input variable has to have the same unit (mm or m or ...)
"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if axis == "z":
mask[
(xAxisSquare + yAxisSquare <= (diameter / 2) ** 2) & (zAxisSquare == 0)
] = 1
if axis == "y":
mask[
(xAxisSquare + zAxisSquare <= (diameter / 2) ** 2) & (yAxisSquare == 0)
] = 1
if axis == "x":
mask[
(yAxisSquare + zAxisSquare <= (diameter / 2) ** 2) & (xAxisSquare == 0)
] = 1
mask[mask == 0] = "NaN"
return mask
def genEllipseSliceMask(field, a, b, resolution, axis="x"):
"""generate mask for a circulat slice
with >>diameter<<
for a >>field<< and a given >>resolution<<
Every input variable has to have the same unit (mm or m or ...)
"""
# generate spherical mask
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if axis == "z":
mask[
(xAxisSquare / (a / 2) ** 2 + yAxisSquare / (b / 2) ** 2 <= 1)
& (zAxisSquare == 0)
] = 1
elif axis == "y":
mask[
(xAxisSquare / (a / 2) ** 2 + zAxisSquare / (b / 2) ** 2 <= 1)
& (yAxisSquare == 0)
] = 1
elif axis == "x":
mask[
(yAxisSquare / (a / 2) ** 2 + zAxisSquare / (b / 2) ** 2 <= 1)
& (xAxisSquare == 0)
] = 1
mask[mask == 0] = "NaN"
return mask
def ptpPPM(field):
"""Calculate the peak-to-peak homogeneity in ppm."""
return 1e6 * (np.nanmax(field) - np.nanmin(field)) / np.nanmean(field)
def saveParameters(parameters, folder):
"""Saving a dict to the file parameters.npy .
If the file exist it is beeing updated, if the parameters are not stored already.
__future__: Fix usecase: Some parameters are in dict which are identical to the
stored ones and some are new!
"""
try:
print("Saving parameters to file...", end=" ")
print("\x1b[6;30;42m", *parameters.keys(), "\x1b[0m", end=" ")
oldParameters = loadParameters(folder)
if parameters.items() <= oldParameters.items():
print(" ... the parameters are already saved and identical.")
elif set(parameters).issubset(
set(oldParameters)
): # here just keys are compared!
print(
" ...\x1b[6;37;41m"
+ " parameters are NOT saved. Other parameters are stored. Please cleanup! "
+ "\x1b[0m"
)
else:
oldParameters.update(parameters)
np.save(folder + "/parameters", oldParameters)
print(" ... added.")
except FileNotFoundError or AttributeError:
np.save(folder + "/parameters", parameters)
oldParameters = parameters
# print('The following parameters are currently stored:\n', *oldParameters.keys())
def loadParameters(folder):
return np.load(folder + "/parameters.npy", allow_pickle=True).item()
def loadParameter(key, folder):
return loadParameters(folder)[key]
def displayParameters(folder):
print(loadParameters(folder))
def createShimfieldsShimRingV2(
numMagnets=(32, 44),
rings=4,
radii=(0.074, 0.097),
zRange=(-0.08, -0.039, 0.039, 0.08),
resolution=1000,
kValue=2,
simDimensions=(0.04, 0.04, 0.04),
numRotations=2,
):
""" Calculating the magnetic field distributions for a single or multiple Halbach Rings.
This has to be multiplied with the magnetic moment amplitude of a magnet to get the real distribution
For every magnet position we set 4 different rotations: 0°, 45°, 90°, 135°. This has to be considered in the cost function
otherwise two magnets are placed in one position
resolution is the amount of sample points times data points in one dimension
"""
mu = mu_0
# positioning of the magnets in a circle
if len(zRange) == 2:
rings = np.linspace(zRange[0], zRange[1], rings)
elif rings == len(zRange):
rings = np.array(zRange)
else:
print("No clear definition how to place shims...")
rotation_elements = np.linspace(0, np.pi, numRotations, endpoint=False)
# create array to store field data
count = 0
if type(numMagnets) in (list, tuple):
totalNumMagnets = np.sum(numMagnets) * np.size(rings) * numRotations
else:
totalNumMagnets = numMagnets * np.size(rings) * numRotations * len(radii)
print(totalNumMagnets, numMagnets, np.size(rings), np.size(numRotations))
shimFields = np.zeros(
(
int(simDimensions[0] * resolution) + 1,
int(simDimensions[1] * resolution) + 1,
int(simDimensions[2] * resolution) + 1,
3,
totalNumMagnets,
),
dtype=np.float32,
)
for rotation in rotation_elements:
# create halbach array
for row in rings:
for i, radius in enumerate(radii):
angle_elements = np.linspace(
-np.pi, np.pi, numMagnets[i], endpoint=False
)
for angle in angle_elements:
print(
"Simulating magnet "
+ str(count + 1)
+ " of "
+ str(totalNumMagnets),
end="\t",
)
position = (row, radius * np.cos(angle), radius * np.sin(angle))
print(
"@ position {:2.2},\t {:2.2},\t {:2.2}".format(*position),
end="\r",
)
angle = kValue * angle + rotation
dip_vec = [0, np.sin(angle), -np.cos(angle)]
dip_vec = np.multiply(dip_vec, mu)
dip_vec = np.divide(dip_vec, 4 * np.pi)
# create mesh coordinates
x = np.linspace(
-simDimensions[0] / 2 + position[0],
simDimensions[0] / 2 + position[0],
int(simDimensions[0] * resolution) + 1,
dtype=np.float32,
)
y = np.linspace(
-simDimensions[1] / 2 + position[1],
simDimensions[1] / 2 + position[1],
int(simDimensions[1] * resolution) + 1,
dtype=np.float32,
)
z = np.linspace(
-simDimensions[2] / 2 + position[2],
simDimensions[2] / 2 + position[2],
int(simDimensions[2] * resolution) + 1,
dtype=np.float32,
)
x, y, z = np.meshgrid(x, y, z)
vec_dot_dip = 3 * (y * dip_vec[1] + z * dip_vec[2])
# calculate the distance of each mesh point to magnet, optimised for speed
# for improved memory performance move in to b0 calculations
vec_mag = np.square(x) + np.square(y) + np.square(z)
# if the magnet is in the origin, we divide by 0, therefore we set it to nan to
# avoid getting and error. if this has any effect on speed just leave it out
# as we do not care about the values outside of the FOV and even less inside the magnets
vec_mag[(vec_mag <= 1e-15) & (vec_mag >= -1e-15)] = "NaN"
vec_mag_3 = np.power(vec_mag, 1.5)
vec_mag_5 = np.power(vec_mag, 2.5)
del vec_mag
# calculate contributions of magnet to total field, dipole always points in yz plane
# so second term is zero for the x component
shimFields[:, :, :, 0, count] = np.divide(
np.multiply(x, vec_dot_dip), vec_mag_5
)
shimFields[:, :, :, 1, count] = np.divide(
np.multiply(y, vec_dot_dip), vec_mag_5
) - np.divide(dip_vec[1], vec_mag_3)
shimFields[:, :, :, 2, count] = np.divide(
np.multiply(z, vec_dot_dip), vec_mag_5
) - np.divide(dip_vec[2], vec_mag_3)
count += 1
print(
"All magnets are simulated, the shim field array has shape:",
np.shape(shimFields),
"\t\t\t",
)
return shimFields.swapaxes(
0, 1
) # using i,j indexing as the other is too confusing....
def createShimfieldsDoubleRings(
numMagnets=72,
rings=1,
radii=(0.115, 0.12),
zRange=(0, 0),
resolution=1000,
kValue=2,
simDimensions=(0.04, 0.04, 0.04),
numRotations=4,
):
""" Calculating the magnetic field distributions for a single or multiple Halbach Rings.
This has to be multiplied with the magnetic moment amplitude of a magnet to get the real distribution
For every magnet position we set 4 different rotations: 0°, 45°, 90°, 135°. This has to be considered in the cost function
otherwise two magnets are placed in one position
resolution is the amount of sample points times data points in one dimension
"""
mu = mu_0
# positioning of the magnets in a circle
if len(zRange) == 2:
rings = np.linspace(zRange[0], zRange[1], rings)
elif rings == len(zRange):
rings = np.array(zRange)
else:
print("No clear definition how to place shims...")
rotation_elements = np.linspace(0, np.pi, numRotations, endpoint=False)
# create array to store field data
count = 0
totalNumMagnets = numMagnets * np.size(rings) * numRotations * len(radii)
print(totalNumMagnets, numMagnets, np.size(rings), np.size(numRotations))
shimFields = np.zeros(
(
int(simDimensions[0] * resolution) + 1,
int(simDimensions[1] * resolution) + 1,
int(simDimensions[2] * resolution) + 1,
3,
totalNumMagnets,
),
dtype=np.float32,
)
for rotation in rotation_elements:
angle_elements = np.linspace(-np.pi, np.pi, numMagnets, endpoint=False)
# create halbach array
for row in rings:
for angle in angle_elements:
for radius in radii:
print(
"Simulating magnet "
+ str(count + 1)
+ " of "
+ str(totalNumMagnets),
end="\t",
)
position = (row, radius * np.cos(angle), radius * np.sin(angle))
print(
"@ position {:2.2},\t {:2.2},\t {:2.2}".format(*position),
end="\r",
)
angle = kValue * angle + rotation
dip_vec = [0, np.sin(angle), -np.cos(angle)]
dip_vec = np.multiply(dip_vec, mu)
dip_vec = np.divide(dip_vec, 4 * np.pi)
# create mesh coordinates
x = np.linspace(
-simDimensions[0] / 2 + position[0],
simDimensions[0] / 2 + position[0],
int(simDimensions[0] * resolution) + 1,
dtype=np.float32,
)
y = np.linspace(
-simDimensions[1] / 2 + position[1],
simDimensions[1] / 2 + position[1],
int(simDimensions[1] * resolution) + 1,
dtype=np.float32,
)
z = np.linspace(
-simDimensions[2] / 2 + position[2],
simDimensions[2] / 2 + position[2],
int(simDimensions[2] * resolution) + 1,
dtype=np.float32,
)
x, y, z = np.meshgrid(x, y, z)
vec_dot_dip = 3 * (y * dip_vec[1] + z * dip_vec[2])
# calculate the distance of each mesh point to magnet, optimised for speed
# for improved memory performance move in to b0 calculations
vec_mag = np.square(x) + np.square(y) + np.square(z)
# if the magnet is in the origin, we divide by 0, therefore we set it to nan to
# avoid getting and error. if this has any effect on speed just leave it out
# as we do not care about the values outside of the FOV and even less inside the magnets
vec_mag[(vec_mag <= 1e-15) & (vec_mag >= -1e-15)] = "NaN"
vec_mag_3 = np.power(vec_mag, 1.5)
vec_mag_5 = np.power(vec_mag, 2.5)
del vec_mag
# calculate contributions of magnet to total field, dipole always points in yz plane
# so second term is zero for the x component
shimFields[:, :, :, 0, count] = np.divide(
np.multiply(x, vec_dot_dip), vec_mag_5
)
shimFields[:, :, :, 1, count] = np.divide(
np.multiply(y, vec_dot_dip), vec_mag_5
) - np.divide(dip_vec[1], vec_mag_3)
shimFields[:, :, :, 2, count] = np.divide(
np.multiply(z, vec_dot_dip), vec_mag_5
) - np.divide(dip_vec[2], vec_mag_3)
count += 1
print(
"All magnets are simulated, the shim field array has shape:",
np.shape(shimFields),
"\t\t\t",
)
return shimFields.swapaxes(
0, 1
) # using i,j indexing as the other is too confusing....
def createShimfields(
numMagnets=72,
rings=1,
radius=0.115,
zRange=(0, 0),
resolution=1000,
kValue=2,
simDimensions=(0.04, 0.04, 0.04),
numRotations=4,
):
""" Calculating the magnetic field distributions for a single or multiple Halbach Rings.
This has to be multiplied with the magnetic moment amplitude of a magnet to get the real distribution
For every magnet position we set 4 different rotations: 0°, 45°, 90°, 135°. This has to be considered in the cost function
otherwise two magnets are placed in one position
resolution is the amount of sample points times data points in one dimension
"""
mu_0 = mu
# positioning of the magnets in a circle
if len(zRange) == 2:
rings = np.linspace(zRange[0], zRange[1], rings)
elif rings == len(zRange):
rings = np.array(zRange)
else:
print("No clear definition how to place shims...")
rotation_elements = np.linspace(0, np.pi, numRotations, endpoint=False)
# create array to store field data
count = 0
totalNumMagnets = numMagnets * | np.size(rings) | numpy.size |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]),
'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]),
'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]),
'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]),
'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]),
'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]),
'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]),
'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]),
'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]),
'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]),
'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]),
'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]),
'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&285': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&286': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&290': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&291': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&292': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&293': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&294': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&295': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&296': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&306': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&307': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&309': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&310': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&2': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&3': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&6': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&7': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&9': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&10': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&12': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&13': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&14': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&17': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&18': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&21': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&22': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&24': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&25': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&27': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&28': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&29': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&32': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&33': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&36': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&37': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&39': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&40': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&42': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&43': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&44': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&45': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&46': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&50': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&51': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&52': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&53': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&54': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&55': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&56': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&66': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&67': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&69': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&70': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&71': | np.array([-0.5903420131350324, 0.384224764046184]) | numpy.array |
"""This module contains a customized Pillow Image class."""
import numpy as np
class OCRImage:
"""OCRImage"""
def __init__(self,
pil_image,
img_id=None,
img_class=None,
matrix=None,
img_hex=None):
"""
:param pil_image: Pillow Image.
https://pillow.readthedocs.io/en/stable/reference/Image.html
:param img_id: Unique id for image object.
:param img_class: Image class.
:param matrix: 2d numpy array with image pixels (0/1).
"""
self.img_id = img_id
self.img_class = img_class
self.img_hex = img_hex
self.matrix = matrix
self.pil_image = pil_image.convert('1')
self.width, self.height = self.pil_image.size
def create_matrix(self):
"""
Create 2d array of binary image
"""
self.matrix = | np.array(self.pil_image) | numpy.array |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import skimage.io as skio
import numpy as np
import h5py
############################
from run00_common import ImageDirParser
############################
if __name__ == '__main__':
wdir = '../../dataset-image2d/simple4c_test'
imgDirParser = ImageDirParser(wdir=wdir)
print (imgDirParser)
#
pathH5File = 'test-dataset.h5'
f=h5py.File(pathH5File, 'w')
f.create_dataset('scheme', data=np.array(imgDirParser.scheme))
grpData = f.create_group('data')
for ii,dataRow in enumerate(imgDirParser):
print ('[%d] : %s' % (ii, dataRow[0]))
grpName = 'row_%08d' % ii
grp = grpData.create_group(grpName)
for vvi, vv in enumerate(dataRow):
ttype = imgDirParser.scheme[vvi]
tkey = 'col_%02d' % vvi
if ttype == 'path-img2d':
timg = skio.imread(vv)
dset = grp.create_dataset(tkey, data=timg)
elif ttype == 'category-idx':
dset = grp.create_dataset(tkey, data=np.array(vv))
elif ttype == 'array-float':
dset = grp.create_dataset(tkey, data=vv)
elif ttype == 'category-name':
dset = grp.create_dataset(tkey, data= | np.array(vv) | numpy.array |
from six.moves import range
import six
import os
import re
import numpy as np
import onnxruntime as rt
dir_path = os.path.dirname(os.path.realpath(__file__))
sess = rt.InferenceSession(os.path.join(dir_path, 'model/cls.onnx'))
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
char_list = [
'x', 'X', 'S', '8', '\n', '.', ':', '-', '*', ')', '?', '(', ',', '/', '#',
'%', '\t', '+', ';', '=', '>', "'", '"', '&', ']', '<'
]
char2id = {item: item_id for item_id, item in enumerate(char_list)}
DEFAULT_EXCLUSIVE = ['M.D.', 'Dr.', 'vs.']
def get_possible_eos(text, exclusive_phrase):
possible_eos_re = [
' [A-Z]', '\.', '\?', '\n', '\t', '\)', '\]', '\}', '\*', '"', ':'
]
eos_re = re.compile('|'.join(possible_eos_re))
eos = set()
for eos_find in eos_re.finditer(text):
start_id = eos_find.span()[0]
exclusive = False
for phrase in exclusive_phrase:
if text[start_id - len(phrase) + 1:start_id + 1] == phrase:
exclusive = True
break
if not exclusive:
eos.update([start_id])
eos = list(eos)
eos.sort()
return eos
def get_context_char(text, char_id, window=5):
max_len = len(text)
assert 0 <= char_id < max_len
left_text = []
for i in range(window):
if char_id - i - 1 < 0:
left_text.insert(0, ' ')
else:
left_text.insert(0, text[char_id - i - 1])
right_text = []
for i in range(window):
if char_id + 1 + i >= max_len:
right_text.append(' ')
else:
right_text.append(text[char_id + 1 + i])
return left_text + [text[char_id]] + right_text
def one_hot_encoder(X):
final_output = []
for i in range(11):
targets = np.array(X[:, i]).reshape(-1)
final_output.append( | np.eye(27) | numpy.eye |
import pytest
import numpy as np
from math import sqrt
from lmdec.array.metrics import subspace_dist, q_value_converge, rmse_k
num_run = 10
p = np.linspace(0, 10, 10)
norm_range = [0, -1, 1, float('inf'), 2, *p]
decimals = 6
def test_v_subspace_reflexive():
"""
V1 = Anything
V2 = V1
S = Anything
subspace_dist(V1, V2, S) == 0:
"""
for _ in range(num_run):
for N in range(1, 1002, 100):
for K in range(1, min(52, N), 5):
for P in norm_range:
V1, _ = np.linalg.qr(np.random.rand(N, K))
V2 = V1.copy()
S = np.random.randn(K) + 10
np.testing.assert_almost_equal(subspace_dist(V1, V2, S, P), 0, decimal=decimals)
def test_v_subspace_shuffle_columns():
"""
V1 = Identity
V2 = ColumnShuffle(Identity)
S = Anything
subspace_dist(V1, V2, S) == 0:
"""
for N in range(2, 10):
I = np.eye(N)
I_shuffle = np.random.permutation(I.T).T
S = np.random.randn(N) + 10
for P in norm_range:
np.testing.assert_almost_equal(subspace_dist(I, I_shuffle, S, P), 0, decimal=decimals)
def test_v_subspace_case2():
"""
Same V with degenerate Singular Values
"""
V1 = np.array([[1, 0],
[0, 1]])
V2 = np.array([[1, 0],
[0, 1]])
V2, _ = np.linalg.qr(V2)
S = np.array([1, 0])
for P in norm_range:
if P == -1:
np.testing.assert_almost_equal(subspace_dist(V1, V2, S, P), 1, decimal=decimals)
else:
np.testing.assert_almost_equal(subspace_dist(V1, V2, S, P), 0, decimal=decimals)
def test_v_subspace_case3():
"""
Same V with non-degenerate Singular Values
"""
V1 = np.array([[1, 0],
[0, 1]])
V2 = np.array([[1, sqrt(2)/2],
[0, sqrt(2)/2]])
V2, _ = np.linalg.qr(V2)
S = np.array([1, 1])
np.testing.assert_almost_equal(subspace_dist(V1, V2, S), 0, decimal=decimals)
def test_v_subspace_case4():
"""
Different V with non-degenerate Singular Values
"""
V1 = np.array([[1, 0],
[0, 1],
[0, 0]])
V2 = np.array([[1, 0],
[0, 0],
[0, 1]])
V2, _ = np.linalg.qr(V2)
for p in np.logspace(0, 10):
for a in np.linspace(0, 1):
S = np.array([1, a])
np.testing.assert_almost_equal(subspace_dist(V1, V2, S, power=p), (a ** p) / (1 ** p + a ** p),
decimal=decimals)
def test_v_subspace_case5():
a = np.random.randn(100, 50)
U, S, V = np.linalg.svd(a, full_matrices=False)
with pytest.raises(ValueError):
subspace_dist(U, a.dot(V), S)
def test_q_vals_reflexive():
for K in range(1, 10):
si = np.random.rand(K)
sj = si.copy()
for norm in norm_range:
for scale in [True, False]:
assert q_value_converge(si, sj, norm=norm, scale=scale) == 0
def test_q_vals_case1():
for a in | np.linspace(0, 1) | numpy.linspace |
from picamera.array import PiRGBArray
from picamera import PiCamera
from time import sleep
import time
import _datetime
import numpy as np
import os
import matplotlib.pyplot as plt
import cv2
from imageio import imread
from scipy.spatial import distance
from keras.models import load_model
import pandas as pd
from tqdm import tqdm
import dlib
from model import create_model
from align import AlignDlib
import glob
import imutils
import math
# INITIALIZE MODELS
nn4_small2 = create_model()
nn4_small2.summary()
nn4_small2.load_weights('weights/nn4.small2.v1.h5')
alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')
#LOAD TRAINING INFORMATION
train_paths = glob.glob("image/*")
print(train_paths)
nb_classes = len(train_paths)
df_train = pd.DataFrame(columns=['image', 'label', 'name'])
for i,train_path in enumerate(train_paths):
name = train_path.split("\\")[-1]
images = glob.glob(train_path + "/*")
for image in images:
df_train.loc[len(df_train)]=[image,i,name]
# print(df_train.head())
# PRE-PROCESSING
def l2_normalize(x, axis=-1, epsilon=1e-10):
output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))
return output
def align_face(face):
#print(img.shape)
(h,w,c) = face.shape
bb = dlib.rectangle(0, 0, w, h)
#print(bb)
return alignment.align(96, face, bb,landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
def load_and_align_images(filepaths):
aligned_images = []
for filepath in filepaths:
#print(filepath)
img = cv2.imread(filepath)
aligned = align_face(img)
aligned = (aligned / 255.).astype(np.float32)
aligned = np.expand_dims(aligned, axis=0)
aligned_images.append(aligned)
return | np.array(aligned_images) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 23 08:28:54 2019
@author: victor
"""
import numpy as np
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def getData(data_set_x):
'''
Clease dataset and split into inputs and outputs
Arguments:
data_set_x -- A numpy array of size (number of examples, 14)
Returns:
data_set_x -- Input data array of size (number of examples, 11)
data_set_y -- Output data array of size (number of examples, 1)
'''
# Set up output numpy array
pre_data_set_y = data_set_x[:,-1]
data_set_y = | np.zeros((pre_data_set_y.shape[0], 1)) | numpy.zeros |
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
savefigures_directory = '' #'/Users/hematthi/Documents/GradSchool/Research/My_Papers/He_Ford_Ragozzine_Clusters_AMD/Figures/Compare_models/'
save_name = 'Models_Compare'
##### To load the underlying populations:
# Model 1:
loadfiles_directory1 = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/'
run_number1 = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory1 + 'periods%s.out' % run_number1)
param_vals_all1 = read_sim_params(loadfiles_directory1 + 'periods%s.out' % run_number1)
sssp_per_sys1, sssp1 = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory1, run_number=run_number1, load_full_tables=True)
# Model 2:
loadfiles_directory2 = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/Extrapolate_P1000d/f_amd_crit_2/' #'/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_med/'
run_number2 = ''
param_vals_all2 = read_sim_params(loadfiles_directory2 + 'periods%s.out' % run_number2)
sssp_per_sys2, sssp2 = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory2, run_number=run_number2, load_full_tables=True)
# Model 3:
loadfiles_directory3 = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/Extrapolate_P1000d/AMD_in_out/' #'/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_med/Extrapolate_P1000d/lambdac5/' #'/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/f_amd_crit_all/Params12_KS/durations_norm_circ_singles_multis_GF2020_KS/GP_med/f_amd_crit_2/'
run_number3 = ''
param_vals_all3 = read_sim_params(loadfiles_directory3 + 'periods%s.out' % run_number3)
sssp_per_sys3, sssp3 = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory3, run_number=run_number3, load_full_tables=True)
model_sssp = [sssp1, sssp2, sssp3]
model_sssp_per_sys = [sssp_per_sys1, sssp_per_sys2, sssp_per_sys3]
model_names = ['Max AMD model', 'Max AMD model extrapolated', 'Max AMD model in-out'] #[r'$f_{\rm crit} = 0.5$', r'$f_{\rm crit} = 1$', r'$f_{\rm crit} = 2$'] #['Maximum AMD model', 'Two-Rayleigh model'] # Make sure this matches the models loaded!
model_linestyles = ['-', '-', '-'] #['-', '--', '--']
model_colors = ['purple','g','gray'] #['b', 'g', 'r']
##### To plot the simulated catalog as marginal distributions:
subdirectory = ''
fig_size = (8,3) #size of each panel (figure)
fig_lbrt = [0.15, 0.3, 0.95, 0.925]
n_bins = 100
lw = 2 #linewidth
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 16 #legend labels font size
#'''
# Multiplicities:
#plot_fig_counts_hist_simple(fig_size, [np.concatenate((sssp_per_sys['Mtot_all'], np.zeros(N_sim-len(sssp_per_sys['Mtot_all']), dtype=int))) for sssp_per_sys in model_sssp_per_sys], [], x_min=-1, x_llim=-0.5, x_ulim=10.5, normalize=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text='Planets per system', ylabel_text='Fraction', afs=afs, tfs=tfs, lfs=lfs, legend=True, show_counts_sim=False, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_multiplicities.pdf', save_fig=savefigures) # if sssp_per_sys['Mtot_all'] does not contain zeros
plot_fig_counts_hist_simple(fig_size, [sssp_per_sys['Mtot_all'] for sssp_per_sys in model_sssp_per_sys], [], x_min=-1, x_llim=-0.5, x_ulim=10.5, normalize=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text='Planets per system', ylabel_text='Fraction', afs=afs, tfs=tfs, lfs=lfs, legend=True, show_counts_sim=False, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_multiplicities.pdf', save_fig=savefigures) # if sssp_per_sys['Mtot_all'] contains zeros
# Clusters per system:
plot_fig_counts_hist_simple(fig_size, [sssp['clustertot_all'] for sssp in model_sssp], [], x_llim=0.5, x_ulim=5.5, normalize=True, lw=lw, c_sim=model_colors, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'Clusters per system $N_c$', ylabel_text='Fraction', afs=afs, tfs=tfs, lfs=lfs, show_counts_sim=False, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_clusters_per_system.pdf', save_fig=savefigures)
'''
for n in [1,2,3]:
for i,sssp in enumerate(model_sssp):
x = np.sum(sssp['clustertot_all'] == n)/float(len(sssp['clustertot_all']))
plt.text(n, x*(0.1)**(i+1), '{:0.2f}'.format(x) if x>0.01 else '<0.01', ha='center', color=model_colors[i], fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + save_name + '_underlying_clusters_per_system.pdf')
plt.close()
'''
# Planets per cluster:
plot_fig_counts_hist_simple(fig_size, [sssp['pl_per_cluster_all'] for sssp in model_sssp], [], x_llim=0.5, x_ulim=7.5, normalize=True, lw=lw, c_sim=model_colors, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'Planets per cluster $N_p$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_planets_per_cluster.pdf', save_fig=savefigures)
# Periods:
#plot_fig_pdf_simple(fig_size, [sssp['P_all'] for sssp in model_sssp], [], x_min=P_min, x_max=P_max, n_bins=n_bins, log_x=True, log_y=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xticks_custom=[3,10,30,100,300], xlabel_text=r'$P$ (days)', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_periods.pdf', save_fig=savefigures)
plot_fig_pdf_simple(fig_size, [sssp['P_all'] for sssp in model_sssp], [], x_min=P_min, x_max=1000., n_bins=n_bins, normalize=False, log_x=True, log_y=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xticks_custom=[3,10,30,100,300], xlabel_text=r'$P$ (days)', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_periods.pdf', save_fig=savefigures) #####
# Period ratios (all):
plot_fig_pdf_simple(fig_size, [sssp['Rm_all'] for sssp in model_sssp], [], x_min=1., x_max=20., n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xticks_custom=[1,2,3,4,5,10,20], xlabel_text=r'$P_{i+1}/P_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_periodratios.pdf', save_fig=savefigures)
#plt.minorticks_off()
# Period ratios (< 5):
plot_fig_pdf_simple(fig_size, [sssp['Rm_all'][sssp['Rm_all'] < 5] for sssp in model_sssp], [], x_min=1., x_max=5., n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xticks_custom=[1,2,3,4,5], xlabel_text=r'$P_{i+1}/P_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_periodratios_less5.pdf', save_fig=savefigures)
# Eccentricities:
x_min, x_max = 1e-3, 1.
#plot_fig_pdf_simple(fig_size, [sssp['e_all'] for sssp in model_sssp], [], x_min=x_min, x_max=x_max, n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xticks_custom=[1e-3,1e-2,1e-1,1.], xlabel_text=r'$e$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
plot_fig_pdf_simple(fig_size, [sssp2['e_all']], [], x_min=x_min, x_max=x_max, n_bins=n_bins, log_x=True, c_sim=[model_colors[1]], lw=lw, ls_sim=[model_linestyles[1]], labels_sim=[''], xticks_custom=[1e-3,1e-2,1e-1,1.], xlabel_text=r'$e$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
bins = np.logspace(np.log10(x_min), np.log10(x_max), n_bins+1)
e1 = sssp_per_sys1['e_all'][sssp_per_sys1['Mtot_all'] == 1, 0]
e2p = sssp_per_sys1['e_all'][sssp_per_sys1['Mtot_all'] > 1]
e2p = e2p[sssp_per_sys1['P_all'][sssp_per_sys1['Mtot_all'] > 1] > 0]
plt.hist(e1, bins=bins, histtype='step', weights=np.ones(len(e1))/len(sssp1['e_all']), color=model_colors[0], ls='--', lw=lw, label='Singles')
plt.hist(e2p, bins=bins, histtype='step', weights=np.ones(len(e2p))/len(sssp1['e_all']), color=model_colors[0], ls=model_linestyles[0], lw=lw, label='Multis')
plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + save_name + '_underlying_eccentricities.pdf')
plt.close()
# Mutual inclinations:
plot_fig_pdf_simple(fig_size, [sssp['inclmut_all']*(180./np.pi) for sssp in model_sssp], [], x_min=1e-2, x_max=90., n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xticks_custom=[1e-2,1e-1,1.,10.,1e2], xlabel_text=r'$i_m$ (deg)', afs=afs, tfs=tfs, lfs=lfs, legend=True, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_mutualinclinations.pdf', save_fig=savefigures)
# Planet masses:
plot_fig_pdf_simple(fig_size, [sssp['mass_all'] for sssp in model_sssp], [], x_min=0.09, x_max=1e2, n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$M_p$ ($M_\oplus$)', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_masses.pdf', save_fig=savefigures)
# Planet radii:
plot_fig_pdf_simple(fig_size, [sssp['radii_all'] for sssp in model_sssp], [], x_min=radii_min, x_max=radii_max, n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xticks_custom=[0.5,1,2,4,10], xlabel_text=r'$R_p$ ($R_\oplus$)', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_radii.pdf', save_fig=savefigures)
# Planet radii ratios:
plot_fig_pdf_simple(fig_size, [sssp['radii_ratio_all'] for sssp in model_sssp], [], x_min=0.1, x_max=10., n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$R_{p,i+1}/R_{p,i}$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_radii_ratios.pdf', save_fig=savefigures)
# Separations in mutual Hill radii:
plot_fig_pdf_simple(fig_size, [sssp['N_mH_all'] for sssp in model_sssp], [], x_min=1., x_max=200., n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$\Delta$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_deltas.pdf', save_fig=savefigures)
# Stellar radii:
plot_fig_pdf_simple(fig_size, [sssp['Rstar_all'] for sssp in model_sssp], [], x_min=0.5, x_max=2.5, n_bins=n_bins, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$R_\star (R_\odot)$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_stellar_radii.pdf', save_fig=savefigures)
### GF2020 metrics, but for the underlying systems:
# Dynamical masses CDFs:
plot_fig_pdf_simple(fig_size, [sssp_per_sys['dynamical_mass'] for sssp_per_sys in model_sssp_per_sys], [], x_min=2e-7, x_max=1e-3, n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$\mu$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_dynamical_masses.pdf', save_fig=savefigures)
# Planet radii partitioning CDFs:
plot_fig_pdf_simple(fig_size, [sssp_per_sys['radii_partitioning'] for sssp_per_sys in model_sssp_per_sys], [], x_min=1e-5, x_max=1., n_bins=n_bins, log_x=True, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$\mathcal{Q}_R$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_radii_partitioning.pdf', save_fig=savefigures)
# Planet radii monotonicity CDFs:
plot_fig_pdf_simple(fig_size, [sssp_per_sys['radii_monotonicity'] for sssp_per_sys in model_sssp_per_sys], [], x_min=-0.6, x_max=0.6, n_bins=n_bins, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$\mathcal{M}_R$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_radii_monotonicity.pdf', save_fig=savefigures)
# Gap complexity CDFs:
plot_fig_pdf_simple(fig_size, [sssp_per_sys['gap_complexity'] for sssp_per_sys in model_sssp_per_sys], [], x_min=0., x_max=1., n_bins=n_bins, log_x=False, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'$\mathcal{C}$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_underlying_gap_complexity.pdf', save_fig=savefigures)
plt.show()
plt.close()
#'''
##### To plot the intrinsic multiplicity, clusters per system, and planets per cluster distributions for multiple models in the same figure:
'''
fig = plt.figure(figsize=(8,8))
plot = GridSpec(3,1,left=0.15,bottom=0.1,right=0.95,top=0.98,wspace=0,hspace=0.4)
ax = plt.subplot(plot[0,0]) # intrinsic multiplicities
plot_panel_counts_hist_simple(ax, [np.concatenate((sssp_per_sys['Mtot_all'], np.zeros(N_sim-len(sssp_per_sys['Mtot_all']), dtype=int))) for sssp_per_sys in model_sssp_per_sys], [], x_min=0, x_ulim=10.5, normalize=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text='Intrinsic planet multiplicity', ylabel_text='Fraction', afs=afs, tfs=tfs, lfs=lfs, legend=True, show_counts_sim=False)
ax = plt.subplot(plot[1,0]) # clusters per system
Nc_max = np.max([np.max(sssp['clustertot_all']) for sssp in model_sssp])
plot_panel_pdf_simple(ax, [sssp['clustertot_all'] for sssp in model_sssp], [], x_min=0.5, x_max=Nc_max+0.5, n_bins=Nc_max, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'Clusters per system $N_c$', afs=afs, tfs=tfs, lfs=lfs)
plt.xlim([0.5,6.5])
#for n in [1,2,3]:
# for i,sssp in enumerate(model_sssp):
# x = np.sum(sssp['clustertot_all'] == n)/float(len(sssp['clustertot_all']))
# plt.text(n, x*(0.1)**(i+1), '{:0.2f}'.format(x) if x>0.01 else '<0.01', ha='center', color=model_colors[i], fontsize=lfs)
ax = plt.subplot(plot[2,0]) # planets per cluster
Np_max = np.max([np.max(sssp['pl_per_cluster_all']) for sssp in model_sssp])
plot_panel_pdf_simple(ax, [sssp['pl_per_cluster_all'] for sssp in model_sssp], [], x_min=0.5, x_max=Np_max+0.5, n_bins=Np_max, c_sim=model_colors, lw=lw, ls_sim=model_linestyles, labels_sim=model_names, xlabel_text=r'Planets per cluster $N_p$', afs=afs, tfs=tfs, lfs=lfs)
plt.xlim([0.5,8.5])
if savefigures:
plt.savefig(savefigures_directory + subdirectory + save_name + '_underlying_planet_cluster_multiplicities.pdf')
plt.close()
'''
'''
# Model 1:
loadfiles_directory1 = 'ACI/Simulated_Data/Julia_v0.7/Kepler_catalog_optimization/q1q17_dr25_gaia_fgk_stars80006/Clustered_P_R/f_high_incl_low_incl_mmr/Fit_rate_mult_P_Pratios_D_Dratios_dur_durratios_mmr/Some11_params_KSweightedrms/lc_lp_0p5_5_alphaP_-2_1_alphaR1_R2_-6_0_ecc_0_0p1_incl_inclmmr_0_90_sigmaR_0_0p5_sigmaP_0_0p3/Fixed_Rbreak3_Ncrit8/targs400030_maxincl0_maxiters5000/sigma_i_greater_sigma_i_mmr/best_N/' #'ExoplanetsSysSim_Clusters/clusters_v0.7/'
n1_1plus = np.zeros(10)
ptot1 = np.zeros(10)
for i in range(10):
run_number1 = str(i)
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory1 + 'periods%s.out' % run_number1)
param_vals_all1 = read_sim_params(loadfiles_directory1 + 'periods%s.out' % run_number1)
sssp_per_sys1, sssp1 = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory1, run_number=run_number1)
n1_1plus[i] = len(sssp_per_sys1['Mtot_all'])
ptot1[i] = np.sum(sssp_per_sys1['Mtot_all'])
n1_none = N_sim - n1_1plus
f1_none = n1_none/N_sim
fp1 = ptot1/float(N_sim)
print 'Clustered_P_R'
print 'Number of stars with no planets: ', n1_none
print 'Fraction of stars with no planets: ', f1_none
print 'Total number of planets: ', ptot1
print 'Ratio of planets to stars: ', fp1
# Model 2:
loadfiles_directory2 = 'ACI/Simulated_Data/Julia_v0.7/Kepler_catalog_optimization/q1q17_dr25_gaia_fgk_stars80006/Clustered_P/f_high_incl_low_incl_mmr/Fit_rate_mult_P_Pratios_D_Dratios_dur_durratios_mmr/Some10_params_KSweightedrms/Fixed_Rbreak3_Ncrit8/lc_lp_0p5_5_alphaP_-2_1_alphaR1_R2_-6_0_ecc_0_0p1_incl_inclmmr_0_90_sigmaP_0_0p3/targs400030_maxincl0_maxiters5000/sigma_i_greater_sigma_i_mmr/best_N/'
n2_1plus = np.zeros(10)
ptot2 = np.zeros(10)
for i in range(10):
run_number2 = str(i)
param_vals_all2 = read_sim_params(loadfiles_directory2 + 'periods%s.out' % run_number2)
sssp_per_sys2, sssp2 = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory2, run_number=run_number2)
n2_1plus[i] = len(sssp_per_sys2['Mtot_all'])
ptot2[i] = np.sum(sssp_per_sys2['Mtot_all'])
n2_none = N_sim - n2_1plus
f2_none = n2_none/N_sim
fp2 = ptot2/float(N_sim)
print 'Clustered_P'
print 'Number of stars with no planets: ', n2_none
print 'Fraction of stars with no planets: ', f2_none
print 'Total number of planets: ', ptot2
print 'Ratio of planets to stars: ', fp2
# Model 3:
loadfiles_directory3 = 'ACI/Simulated_Data/Julia_v0.7/Kepler_catalog_optimization/q1q17_dr25_gaia_fgk_stars80006/Non_Clustered/f_high_incl_low_incl_mmr/Fit_rate_mult_P_Pratios_D_Dratios_dur_durratios_mmr/Some8_params_KSweightedrms/Fixed_Rbreak3_Ncrit8/lc_1_10_alphaP_-2_1_alphaR1_R2_-6_0_ecc_0_0p1_incl_inclmmr_0_90/targs400030_maxincl0_maxiters5000/sigma_i_greater_sigma_i_mmr/best_N/'
n3_1plus = np.zeros(10)
ptot3 = np.zeros(10)
for i in range(10):
run_number3 = str(i)
param_vals_all3 = read_sim_params(loadfiles_directory3 + 'periods%s.out' % run_number3)
sssp_per_sys3, sssp3 = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory3, run_number=run_number3)
n3_1plus[i] = len(sssp_per_sys3['Mtot_all'])
ptot3[i] = np.sum(sssp_per_sys3['Mtot_all'])
n3_none = N_sim - n3_1plus
f3_none = n3_none/N_sim
fp3 = ptot3/float(N_sim)
print 'Non_clustered:'
print 'Number of stars with no planets: ', n3_none
print 'Fraction of stars with no planets: ', f3_none
print 'Total number of planets: ', ptot3
print 'Ratio of planets to stars: ', fp3
'''
'''
##### To plot the inner vs. outer period ratios of triplets (in 3+ systems) (similar to Fig 6 in Zhu et al. 2019 and Fig 7 in Weiss et al. 2018a, BUT for the intrinsic systems):
# Just for Clustered_P_R model:
#p_per_sys_subset = sssp_per_sys1['P_all'][np.random.choice(np.arange(len(sssp_per_sys1['P_all'])), int(round(len(sssp_per_sys1['P_all'])/(N_sim/N_Kep))), replace=False)]
p_per_sys_subset = sssp_per_sys1['P_all'][np.random.choice(np.arange(len(sssp_per_sys1['P_all'])), 1000, replace=False)]
compute_pratio_in_out_and_plot_fig([p_per_sys_subset], colors=[model_colors[0]], labels=[model_names[0]], xymax=25., xyticks_custom=[1,2,3,4,5,10,20], afs=afs, tfs=tfs, lfs=lfs, save_name='Meeting_plots/July_9_2019/Clustered_P_R_intrinsic_pratio_in_out.pdf', save_fig=False)
# For all three models:
p_per_sys_subset1 = sssp_per_sys1['P_all'][np.random.choice(np.arange(len(sssp_per_sys1['P_all'])), 1000, replace=False)]
p_per_sys_subset2 = sssp_per_sys2['P_all'][np.random.choice(np.arange(len(sssp_per_sys2['P_all'])), 1000, replace=False)]
p_per_sys_subset3 = sssp_per_sys3['P_all'][np.random.choice(np.arange(len(sssp_per_sys3['P_all'])), 1000, replace=False)]
compute_pratio_in_out_and_plot_fig([p_per_sys_subset1, p_per_sys_subset2, p_per_sys_subset3], colors=model_colors, labels=model_names, xymax=25., xyticks_custom=[1,2,3,4,5,10,20], afs=afs, tfs=tfs, lfs=lfs, save_name='Meeting_plots/July_9_2019/Models_Compare_intrinsic_pratio_in_out.pdf', save_fig=False)
compute_pratio_in_out_and_plot_fig_pdf([sssp_per_sys1['P_all'], sssp_per_sys2['P_all'], sssp_per_sys3['P_all']], n_bins=100, x_min=0.1, x_max=10., colors=model_colors, labels=model_names, afs=afs, tfs=tfs, lfs=lfs, save_name='Meeting_plots/July_9_2019/Models_Compare_intrinsic_pratio_out_in_ratio.pdf', save_fig=False)
plt.show()
plt.close()
'''
##### To compute the fraction of all planets near an MMR:
#f_mmr = calc_f_near_pratios(sssp_per_sys1)
##### To plot galleries of a sample of intrinsic multi-planet systems:
#plot_figs_multis_underlying_gallery(sssp_per_sys1, sssp1, n_pl=8, fig_size=(16,8), panels_per_fig=4, N_sys_sample=400, N_sys_per_plot=50, plot_line_per=1, colorby='clusterid', tfs=20, save_name_base=savefigures_directory + subdirectory + save_name + '_underlying_multis', save_fig=False)
#plot_figs_multis_underlying_gallery(sssp_per_sys1, sssp1, n_pl=3, fig_size=(8,16), panels_per_fig=1, N_sys_sample=100, N_sys_per_plot=100, plot_line_per=10, colorby='clusterid', tfs=20, save_name_base=savefigures_directory + subdirectory + save_name + '_underlying_multis', save_fig=False)
##### To plot eccentricity vs mutual inclinations, with attached histograms:
persys_1d_1, perpl_1d_1 = convert_underlying_properties_per_planet_1d(sssp_per_sys1, sssp1)
persys_1d_2, perpl_1d_2 = convert_underlying_properties_per_planet_1d(sssp_per_sys2, sssp2)
ecc_min_max, incl_min_max = [3e-4, 1.], [1e-2, 180.]
bins_log_ecc = np.linspace(np.log10(ecc_min_max[0]), np.log10(ecc_min_max[1]), 101)
bins_log_incl = np.linspace(np.log10(incl_min_max[0]), np.log10(incl_min_max[1]), 101)
fig = plt.figure(figsize=(16,8))
plot = GridSpec(5, 9, left=0.1, bottom=0.1, right=0.975, top=0.975, wspace=0, hspace=0)
ax = plt.subplot(plot[1:,:4]) # scatter i_m vs ecc (model 1)
corner.hist2d(np.log10(perpl_1d_1['e_all']), np.log10(perpl_1d_1['im_all']), bins=50, plot_density=False, contour_kwargs={'colors': ['0.6','0.4','0.2','0']}, data_kwargs={'color': 'k'})
ax.tick_params(axis='both', labelsize=afs)
xtick_vals = np.array([-3., -2., -1., 0.])
ytick_vals = np.array([-2., -1., 0., 1., 2.])
plt.xticks(xtick_vals, 10.**xtick_vals)
plt.yticks(ytick_vals, 10.**ytick_vals)
plt.xlim(np.log10( | np.array(ecc_min_max) | numpy.array |
from envs import *
from utils import *
from config import *
from torch.multiprocessing import Pipe
from nes_py.wrappers import JoypadSpace
from tensorboardX import SummaryWriter
import numpy as np
import copy
import os
import pickle
import argparse
parser = argparse.ArgumentParser("Evaluate")
parser.add_argument('--shared_features', action='store_true', help="")
args = parser.parse_args()
if args.shared_features:
from agents import *
else:
from agents_sep import *
def main():
name = 'submission'
print(name)
try:
os.makedirs('models/' + name)
except OSError:
pass
print({section: dict(config[section]) for section in config.sections()})
train_method = default_config['TrainMethod']
env_id = default_config['EnvID']
env_type = default_config['EnvType']
if env_type == 'mario':
env = JoypadSpace(gym_super_mario_bros.make(env_id), COMPLEX_MOVEMENT)
elif env_type == 'atari':
env = gym.make(env_id)
else:
raise NotImplementedError
input_size = env.observation_space.shape # 4
output_size = env.action_space.n # 2
if 'Breakout' in env_id:
output_size -= 1
env.close()
is_load_model = False
# Render
is_render = False
model_path = 'models/{}.model'.format(env_id)
icm_path = 'models/{}.icm'.format(env_id)
writer = SummaryWriter('runs/' + name)
use_cuda = default_config.getboolean('UseGPU')
use_gae = default_config.getboolean('UseGAE')
use_noisy_net = default_config.getboolean('UseNoisyNet')
lam = float(default_config['Lambda'])
num_worker = int(default_config['NumEnv'])
num_step = int(default_config['NumStep'])
ppo_eps = float(default_config['PPOEps'])
epoch = int(default_config['Epoch'])
mini_batch = int(default_config['MiniBatch'])
batch_size = int(num_step * num_worker / mini_batch)
learning_rate = float(default_config['LearningRate'])
entropy_coef = float(default_config['Entropy'])
gamma = float(default_config['Gamma'])
eta = float(default_config['ETA'])
stack_size = int(default_config['StateStackSize'])
clip_grad_norm = float(default_config['ClipGradNorm'])
reward_rms = RunningMeanStd()
obs_rms = RunningMeanStd(shape=(1, stack_size, 84, 84))
pre_obs_norm_step = int(default_config['ObsNormStep'])
discounted_reward = RewardForwardFilter(gamma)
agent = ICMAgent
if default_config['EnvType'] == 'atari':
env_type = AtariEnvironment
elif default_config['EnvType'] == 'mario':
env_type = MarioEnvironment
else:
raise NotImplementedError
agent = agent(
input_size,
output_size,
num_worker,
num_step,
gamma,
lam=lam,
learning_rate=learning_rate,
ent_coef=entropy_coef,
clip_grad_norm=clip_grad_norm,
epoch=epoch,
batch_size=batch_size,
ppo_eps=ppo_eps,
eta=eta,
use_cuda=use_cuda,
use_gae=use_gae,
use_noisy_net=use_noisy_net,
stack_size=stack_size
)
if is_load_model:
if use_cuda:
agent.model.load_state_dict(torch.load(model_path))
agent.icm.load_state_dict(torch.load(icm_path))
agent.mdrnn.load_state_dict(torch.load(mdrnn_path))
else:
agent.model.load_state_dict(torch.load(model_path, map_location='cpu'))
works = []
parent_conns = []
child_conns = []
for idx in range(num_worker):
parent_conn, child_conn = Pipe()
work = env_type(env_id, is_render, idx, child_conn, history_size=stack_size)
work.start()
works.append(work)
parent_conns.append(parent_conn)
child_conns.append(child_conn)
states = np.zeros([num_worker, stack_size, 84, 84])
prev_states = np.zeros([num_worker, stack_size, 84, 84])
prev_actions = np.random.randint(0, output_size, size=(num_worker,))
sample_episode = 0
sample_rall = 0
sample_step = 0
sample_env_idx = 0
sample_i_rall = 0
global_update = 0
global_step = 0
# normalize obs
print('Start to initailize observation normalization parameter.....')
next_obs = []
steps = 0
while steps < pre_obs_norm_step:
steps += num_worker
actions = np.random.randint(0, output_size, size=(num_worker,))
for parent_conn, action in zip(parent_conns, actions):
parent_conn.send(action)
for parent_conn in parent_conns:
s, r, d, rd, lr, max_x = parent_conn.recv()
next_obs.append(s[:])
next_obs = np.stack(next_obs)
obs_rms.update(next_obs)
print('End to initalize...')
rewards_list = []
intrinsic_reward_list = []
max_x_pos_list = []
samples_ep_list = []
global_update_list = []
while True:
total_state, total_reward, total_done, total_next_state, total_action, total_prev_state, total_prev_action, \
total_int_reward, total_next_obs, total_values, total_policy, total_log_reward = \
[], [], [], [], [], [], [], [], [], [], [], []
global_step += (num_worker * num_step)
global_update += 1
# Step 1. n-step rollout
for _ in range(num_step):
actions, value, policy = agent.get_action((states - obs_rms.mean) / np.sqrt(obs_rms.var),
(prev_states - obs_rms.mean) / np.sqrt(obs_rms.var),
prev_actions)
for parent_conn, action in zip(parent_conns, actions):
parent_conn.send(action)
next_states, rewards, dones, real_dones, log_rewards, next_obs, max_x_pos = [], [], [], [], [], [], []
for parent_conn in parent_conns:
s, r, d, rd, lr, max_x = parent_conn.recv()
next_states.append(s)
rewards.append(r)
dones.append(d)
real_dones.append(rd)
log_rewards.append(lr)
max_x_pos.append(max_x)
next_states = np.stack(next_states)
rewards = np.hstack(rewards)
log_rewards = np.hstack(log_rewards)
dones = np.hstack(dones)
real_dones = | np.hstack(real_dones) | numpy.hstack |
from __future__ import print_function
import math
import random
from collections import defaultdict
import numpy as np
import torch
import torch.optim as optim
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from PIL import Image
import cv2
def denormalize(input): # (3,128,128)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
for i in range(3):
input[i,:,:] *= std[i]
input[i,:,:] += mean[i]
return input
def image_from_array(img):
#torch to img
img = denormalize(img.cpu().data.numpy())
img = np.transpose(img, (1,2,0))
img = np.uint8(255 * img)
img = Image.fromarray(img, 'RGB')
return img
def visualize_imgs(img1, img2, img12, epoch, comb , cam = None):
'''
img1 (torch tensor) [3, 128, 128]
img2 (torch tensor)
img12 (torch tensor)
cam (mask)
'''
img1 = image_from_array(img1)
img2 = image_from_array(img2)
img12 = image_from_array(img12)
fig = plt.figure(figsize=(80, 20), dpi=150)
name = f'epoch_{epoch}_{comb}'
fig.suptitle(name, fontsize=80)
plt.rcParams.update({'font.size': 50})
ax = fig.add_subplot(1, 4, 1)
plt.imshow(img1)
ax.set_title('Image 1')
ax = fig.add_subplot(1, 4, 2)
plt.imshow(img2)
ax.set_title('Image 2')
ax = fig.add_subplot(1, 4, 3)
plt.imshow(img12)
ax.set_title('Image 1+2')
if cam is not None:
ax = fig.add_subplot(1, 4, 4)
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = np.uint8(255 * heatmap)
plt.imshow(heatmap)
ax.set_title('CAM')
plt.savefig(f"./DefectMix/{name}.png")
plt.close(fig)
plt.close()
fig.clf()
def mixup_data(x, y, alpha=1.0, inter_class=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
lam = np.random.beta(alpha, alpha)
batch_size = x.size()[0]
if not inter_class: # only intra_class
# 1. gathering w.r.t. label
class_idx = defaultdict(list)
for idx, label in enumerate(y):
class_idx[label.item()].append(idx)
# 2. shuffle between each label
mixed_x = x
for cls_ in class_idx.keys():
origin_idx = class_idx[cls_]
permuted_idx = random.sample(origin_idx, len(origin_idx))
mixed_x[origin_idx, :] = lam * mixed_x[origin_idx, :] + (1 - lam) * mixed_x[permuted_idx, :]
y_a = y_b = y
else:
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def bbox2(img):
assert img.ndim == 2
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
def rand_bbox(size, lam, bbs = None):
W = size[2] # 순서 잘못된 듯?
H = size[3]
if bbs is not None:
bx1, bx2, by1, by2 = bbs # cam
# step 1, get cx and cy except cam
it = 0
while True:
it+=1
if it >= 100:
print(it)
return 0,0,0,0
cx = np.random.randint(W)
cy = np.random.randint(H)
if not (bx1 <= cx <= bx2 and by1 <= cy <= by2):
break
min_dist = W**2 + H**2
min_i = 0
min_j = 0
for i in range(bx1, bx2+1):
for j in range(by1, by2+1):
dist = (cx-i)**2 + (cy-j)**2
if dist <= min_dist:
min_dist = dist
min_i = i
min_j = j
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat) //2
max_cut = max(abs(min_i - cx), abs(min_j - cy))
cut = min(cut_w, max_cut)
bbx1 = np.clip(cx - cut, 0, W)
bby1 = np.clip(cy - cut, 0, H)
bbx2 = np.clip(cx + cut, 0, W)
bby2 = np.clip(cy + cut, 0, H)
mask = np.zeros((W,H))
mask[bx1:bx2, by1:by2] +=1
mask[bbx1:bbx2, bby1:bby2] +=1
if 2 in mask:
print(bbx1,bbx2,bby1,bby2)
print(bx1,bx2,by1,by2)
print(min_i, min_j, cx,cy, cut_w)
assert 2 not in mask
else:
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = | np.random.randint(H) | numpy.random.randint |
from collections import Counter
import numpy as np
from douzero.env.game import GameEnv
Card2Column = {3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7,
11: 8, 12: 9, 13: 10, 14: 11, 17: 12} # 将牌号映射为列号
NumOnes2Array = {0: np.array([0, 0, 0, 0]), # 某种牌的数量对应的numpy编码
1: np.array([1, 0, 0, 0]),
2: | np.array([1, 1, 0, 0]) | numpy.array |
# General Headers####################
import numpy as np
import copy
#####################################
# sklearn headers##################################
import xgboost as xgb
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.model_selection import KFold
from sklearn.neighbors import NearestNeighbors
from math import erfc
import random
#####################################################
def CI_sampler_conditional_kNN(X_in, Y_in, Z_in, train_len=-1, k=1):
'''Generate Test and Train set for converting CI testing into Binary Classification
Arguments:
X_in: Samples of r.v. X (np.array)
Y_in: Samples of r.v. Y (np.array)
Z_in: Samples of r.v. Z (np.array)
train_len: length of training set, must be less than number of samples
k: k-nearest neighbor to be used: Always set k = 1.
Xtrain: Features for training the classifier
Ytrain: Train Labels
Xtest: Features for test set
Ytest: Test Labels
CI_data: Developer Use only
'''
if Z_in is None:
assert (type(X_in) == np.ndarray), "Not an array"
assert (type(Y_in) == np.ndarray), "Not an array"
nx, dx = X_in.shape
ny, dy = Y_in.shape
assert (nx == ny), "Dimension Mismatch"
if train_len == -1:
train_len = int(2 * len(X_in) / 3)
X_tr = X_in[0:train_len, :]
Y_tr = Y_in[0:train_len, :]
X_te = X_in[train_len::, :]
Y_te = Y_in[train_len::, :]
Xtrain, Ytrain = create_Itest_data(X_tr, Y_tr)
Xtest, Ytest = create_Itest_data(X_te, Y_te)
return Xtrain, Ytrain, Xtest, Ytest, None
assert (type(X_in) == np.ndarray), "Not an array"
assert (type(Y_in) == np.ndarray), "Not an array"
assert (type(Z_in) == np.ndarray), "Not an array"
nx, dx = X_in.shape
ny, dy = Y_in.shape
nz, dz = Z_in.shape
assert (nx == ny), "Dimension Mismatch"
assert (nz == ny), "Dimension Mismatch"
assert (nx == nz), "Dimension Mismatch"
samples = np.hstack([X_in, Y_in, Z_in])
Xset = range(0, dx)
Yset = range(dx, dx + dy)
Zset = range(dx + dy, dx + dy + dz)
if train_len == -1:
train_len = int(2 * len(X_in) / 3)
assert (train_len < nx), "Training length cannot be larger than total length"
train = samples[0:train_len, :]
train_2 = copy.deepcopy(train)
X = train_2[:, Xset]
Y = train_2[:, Yset]
Z = train_2[:, Zset]
Yprime = copy.deepcopy(Y)
nbrs = NearestNeighbors(n_neighbors=k + 1, algorithm='ball_tree', metric='l2').fit(Z)
distances, indices = nbrs.kneighbors(Z)
for i in range(len(train_2)):
index = indices[i, k]
Yprime[i, :] = Y[index, :]
train1 = train_2
train2 = np.hstack([X, Yprime, Z])
y1 = np.ones([len(train1), 1])
y2 = np.zeros([len(train2), 1])
all_train1 = np.hstack([train1, y1])
all_train2 = np.hstack([train2, y2])
all_train = np.vstack([all_train1, all_train2])
shuffle = np.random.permutation(len(all_train))
train = all_train[shuffle, :]
l, m = train.shape
Xtrain = train[:, 0:m - 1]
Ytrain = train[:, m - 1]
test = samples[train_len::, :]
test_2 = copy.deepcopy(test)
X = test_2[:, Xset]
Y = test_2[:, Yset]
Z = test_2[:, Zset]
Yprime = copy.deepcopy(Y)
nbrs = NearestNeighbors(n_neighbors=k + 1, algorithm='ball_tree', metric='l2').fit(Z)
distances, indices = nbrs.kneighbors(Z)
for i in range(len(test_2)):
index = indices[i, k]
Yprime[i, :] = Y[index, :]
test1 = test_2
test2 = np.hstack([X, Yprime, Z])
y1 = np.ones([len(test1), 1])
y2 = np.zeros([len(test2), 1])
all_test1 = | np.hstack([test1, y1]) | numpy.hstack |
import numpy as np
with open('tsp.txt','r') as f:
lines = f.readlines()
NC = int(lines[0])
City = list(map(lambda x: tuple(map(float,x.split())), lines[1:]))
def eucliean_distance(x,y):
return np.sqrt((x[0]-y[0])**2+(x[1]-y[1])**2)
#initialize
City_code = [0b1 << i for i in range(NC)]
A_new = {}
A_new_set = set([0b1])
A_new[0b1] = np.zeros(NC)
# main loop
for m in range(2,NC+1):
print('Subproblem size: ', m)
A_old_set = A_new_set.copy()
A_old = A_new.copy()
#print(A_old.keys())
#making new subsets containing m elements:
A_new_set_list = list(filter(lambda x: x & 0b1, A_old_set))
A_new_set_temp = list(map(lambda x: set(map(lambda y: x | y, City_code)), A_new_set_list))
A_new_set = set.union(*A_new_set_temp)
A_new_set = A_new_set - A_old_set
print(' total number of subsets: ',len(A_new_set))
# initialize A_new
A_new = {}
for S in A_new_set:
A_new[S] = | np.full(NC,np.inf) | numpy.full |
import numpy as np
import fixed_env as env
import load_trace
import matplotlib.pyplot as plt
import itertools
S_INFO = 5 # bit_rate, buffer_size, rebuffering_time, bandwidth_measurement, chunk_til_video_end
S_LEN = 8 # take how many frames in the past
A_DIM = 6
MPC_FUTURE_CHUNK_COUNT = 5
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BITRATE_REWARD = [1, 2, 3, 12, 15, 20]
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
TOTAL_VIDEO_CHUNKS = 48
M_IN_K = 1000.0
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps
SMOOTH_PENALTY = 1
DEFAULT_QUALITY = 1 # default video quality without agent
RANDOM_SEED = 42
RAND_RANGE = 1000000
SUMMARY_DIR = './results'
LOG_FILE = './results/log_sim_mpc'
# log in format of time_stamp bit_rate buffer_size rebuffer_time chunk_size download_time reward
# NN_MODEL = './models/nn_model_ep_5900.ckpt'
CHUNK_COMBO_OPTIONS = []
# past errors in bandwidth
past_errors = []
past_bandwidth_ests = []
#size_video1 = [3155849, 2641256, 2410258, 2956927, 2593984, 2387850, 2554662, 2964172, 2541127, 2553367, 2641109, 2876576, 2493400, 2872793, 2304791, 2855882, 2887892, 2474922, 2828949, 2510656, 2544304, 2640123, 2737436, 2559198, 2628069, 2626736, 2809466, 2334075, 2775360, 2910246, 2486226, 2721821, 2481034, 3049381, 2589002, 2551718, 2396078, 2869088, 2589488, 2596763, 2462482, 2755802, 2673179, 2846248, 2644274, 2760316, 2310848, 2647013, 1653424]
size_video1 = [2354772, 2123065, 2177073, 2160877, 2233056, 1941625, 2157535, 2290172, 2055469, 2169201, 2173522, 2102452, 2209463, 2275376, 2005399, 2152483, 2289689, 2059512, 2220726, 2156729, 2039773, 2176469, 2221506, 2044075, 2186790, 2105231, 2395588, 1972048, 2134614, 2164140, 2113193, 2147852, 2191074, 2286761, 2307787, 2143948, 1919781, 2147467, 2133870, 2146120, 2108491, 2184571, 2121928, 2219102, 2124950, 2246506, 1961140, 2155012, 1433658]
size_video2 = [1728879, 1431809, 1300868, 1520281, 1472558, 1224260, 1388403, 1638769, 1348011, 1429765, 1354548, 1519951, 1422919, 1578343, 1231445, 1471065, 1491626, 1358801, 1537156, 1336050, 1415116, 1468126, 1505760, 1323990, 1383735, 1480464, 1547572, 1141971, 1498470, 1561263, 1341201, 1497683, 1358081, 1587293, 1492672, 1439896, 1139291, 1499009, 1427478, 1402287, 1339500, 1527299, 1343002, 1587250, 1464921, 1483527, 1231456, 1364537, 889412]
size_video3 = [1034108, 957685, 877771, 933276, 996749, 801058, 905515, 1060487, 852833, 913888, 939819, 917428, 946851, 1036454, 821631, 923170, 966699, 885714, 987708, 923755, 891604, 955231, 968026, 874175, 897976, 905935, 1076599, 758197, 972798, 975811, 873429, 954453, 885062, 1035329, 1026056, 943942, 728962, 938587, 908665, 930577, 858450, 1025005, 886255, 973972, 958994, 982064, 830730, 846370, 598850]
size_video4 = [668286, 611087, 571051, 617681, 652874, 520315, 561791, 709534, 584846, 560821, 607410, 594078, 624282, 687371, 526950, 587876, 617242, 581493, 639204, 586839, 601738, 616206, 656471, 536667, 587236, 590335, 696376, 487160, 622896, 641447, 570392, 620283, 584349, 670129, 690253, 598727, 487812, 575591, 605884, 587506, 566904, 641452, 599477, 634861, 630203, 638661, 538612, 550906, 391450]
size_video5 = [450283, 398865, 350812, 382355, 411561, 318564, 352642, 437162, 374758, 362795, 353220, 405134, 386351, 434409, 337059, 366214, 360831, 372963, 405596, 350713, 386472, 399894, 401853, 343800, 359903, 379700, 425781, 277716, 400396, 400508, 358218, 400322, 369834, 412837, 401088, 365161, 321064, 361565, 378327, 390680, 345516, 384505, 372093, 438281, 398987, 393804, 331053, 314107, 255954]
size_video6 = [181801, 155580, 139857, 155432, 163442, 126289, 153295, 173849, 150710, 139105, 141840, 156148, 160746, 179801, 140051, 138313, 143509, 150616, 165384, 140881, 157671, 157812, 163927, 137654, 146754, 153938, 181901, 111155, 153605, 149029, 157421, 157488, 143881, 163444, 179328, 159914, 131610, 124011, 144254, 149991, 147968, 161857, 145210, 172312, 167025, 160064, 137507, 118421, 112270]
def get_chunk_size(quality, index):
if ( index < 0 or index > 48 ):
return 0
# note that the quality and video labels are inverted (i.e., quality 4 is highest and this pertains to video1)
sizes = {5: size_video1[index], 4: size_video2[index], 3: size_video3[index], 2: size_video4[index], 1: size_video5[index], 0:size_video6[index]}
return sizes[quality]
def main():
np.random.seed(RANDOM_SEED)
assert len(VIDEO_BIT_RATE) == A_DIM
all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace()
net_env = env.Environment(all_cooked_time=all_cooked_time,
all_cooked_bw=all_cooked_bw)
log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]
log_file = open(log_path, 'w')
time_stamp = 0
last_bit_rate = DEFAULT_QUALITY
bit_rate = DEFAULT_QUALITY
action_vec = np.zeros(A_DIM)
action_vec[bit_rate] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [action_vec]
r_batch = []
entropy_record = []
video_count = 0
# make chunk combination options
for combo in itertools.product([0,1,2,3,4,5], repeat=5):
CHUNK_COMBO_OPTIONS.append(combo)
while True: # serve video forever
# the action is from the last decision
# this is to make the framework similar to the real
delay, sleep_time, buffer_size, rebuf, \
video_chunk_size, next_video_chunk_sizes, \
end_of_video, video_chunk_remain = \
net_env.get_video_chunk(bit_rate)
time_stamp += delay # in ms
time_stamp += sleep_time # in ms
# reward is video quality - rebuffer penalty
reward = VIDEO_BIT_RATE[bit_rate] / M_IN_K \
- REBUF_PENALTY * rebuf \
- SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[bit_rate] -
VIDEO_BIT_RATE[last_bit_rate]) / M_IN_K
# log scale reward
# log_bit_rate = np.log(VIDEO_BIT_RATE[bit_rate] / float(VIDEO_BIT_RATE[0]))
# log_last_bit_rate = np.log(VIDEO_BIT_RATE[last_bit_rate] / float(VIDEO_BIT_RATE[0]))
# reward = log_bit_rate \
# - REBUF_PENALTY * rebuf \
# - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)
# reward = BITRATE_REWARD[bit_rate] \
# - 8 * rebuf - np.abs(BITRATE_REWARD[bit_rate] - BITRATE_REWARD[last_bit_rate])
r_batch.append(reward)
last_bit_rate = bit_rate
# log time_stamp, bit_rate, buffer_size, reward
log_file.write(str(time_stamp / M_IN_K) + '\t' +
str(VIDEO_BIT_RATE[bit_rate]) + '\t' +
str(buffer_size) + '\t' +
str(rebuf) + '\t' +
str(video_chunk_size) + '\t' +
str(delay) + '\t' +
str(reward) + '\n')
log_file.flush()
# retrieve previous state
if len(s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(s_batch[-1], copy=True)
# dequeue history record
state = np.roll(state, -1, axis=1)
# this should be S_INFO number of terms
state[0, -1] = VIDEO_BIT_RATE[bit_rate] / float( | np.max(VIDEO_BIT_RATE) | numpy.max |
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 20:03:50 2019
Finds Vg1 and Vg2 values above a threshold, determined by the ratio of the areas
of a Gaussian fit of the intensity histogram to the total area of the intensities
@author: <NAME>
"""
import numpy as np
import scipy.signal as ss
import scipy.optimize as opt
from scipy.signal import medfilt2d, savgol_filter
from scipy.ndimage import correlate
from sklearn.neighbors import KDTree
import stability as stab
def hist_data(z):
"""
Finds x and y data from histogram
:param z: input
:return: x and y
"""
data = np.histogram(z, bins='scott')
x = data[1]
x = np.array([(x[i] + x[i + 1]) / 2 for i in range(0, len(x) - 1)])
return x, np.array(data[0])
def gauss(x, *params):
return abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2))
def multi_gaussian(x, *params):
"""
Fits multiple Gaussian distributions, number of which determined by the number of parameters inputted
"""
y = np.zeros_like(x)
index = np.arange(0, len(params), 3)
if index.size > 1:
for i in range(0, len(params) // 3):
mu = params[i]
sig = params[i + len(params) // 3]
amp = params[i + 2 * len(params) // 3]
y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
else:
y = y + abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2))
return y
def multi_gauss_background(x, *params):
y = np.zeros_like(x)
index = np.arange(0, len(params) - 2, 3)
if index.size > 1:
y = y + params[0] * x + params[1]
for i in range(0, (len(params) - 2) // 3):
mu = params[i + 2]
sig = params[i + 2 + (len(params) - 2) // 3]
amp = params[i + 2 + 2 * (len(params) - 2) // 3]
y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
else:
y = y + params[0] * x + params[1] + abs(params[4]) * np.exp(-(x - params[2]) ** 2 / (2 * params[3] ** 2))
return y
def greedy_guess(guess, x, y):
n = (len(guess) - 2) // 3
m, sig, a = guess[2:n + 2], guess[n + 2:2 * n + 2], guess[2 * n + 2:]
chi = (y - multi_gauss_background(x, *guess)) / multi_gauss_background(x, *guess)
chi = savgol_filter(chi, 3, 2)
m, a = np.append(m, float(x[np.where(chi == np.max(chi))])), np.append(a, float(y[np.where(chi == np.max(chi))]))
sig = np.append(sig, sig[n - 1] / 2)
return np.append(guess[:2], np.append(m, np.append(sig, a)))
def gradient(x, y, z):
"""
Calculates gradient along x and y of intensities to reduce noise
@param x: x vales
@param y: y values
@param z: intensities
@return:
"""
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix
sg = savgol_filter(m_z, 5, 2) + savgol_filter(m_z, 5, 2, axis=0) # Savgol filter acts as a low pass band filter
signal = sg - np.mean(sg) + np.mean(m_z)
return np.reshape(signal, np.shape(x))
def gradient_exp(x, y, z):
"""
Calculates gradient along x and y of intensities to reduce noise
@param x: x vales
@param y: y values
@param z: intensities
@return:
"""
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix
diff = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]]
z_diff = correlate(m_z, diff)
sg = savgol_filter(z_diff, 5, 2) + savgol_filter(z_diff, 5, 2, axis=0) # Savgol filter acts as a low pass band filter
signal = sg - np.mean(sg) + np.mean(m_z)
return np.reshape(signal, np.shape(x))
def filtering(x, y, z):
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x)))) # Transform array into matrix
s = medfilt2d(m_z)
return np.reshape(s, (int(len(x)),))
def normalise(z):
"""
Unity-based normalisation function, such that all values range between 0 and 1
:param z: Raw data that needs normalising
:return: Normalised data
"""
return np.nan_to_num((z - np.min(z)) / (np.max(z) - np.min(z)))
def fit_gauss(z):
intensity = normalise(z)
x, y = hist_data(intensity)
guess = np.append(0, np.append(np.median(y), np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))))
fit_param, cov = opt.curve_fit(multi_gauss_background, x, y, guess)
if fit_param[2] > 0.5:
index = np.where(intensity<fit_param[2]-3*abs(fit_param[3]))
else:
index = np.where(intensity>fit_param[2]+3*abs(fit_param[3]))
return index
def curved_plane(x, y, param):
return param[0]*x + param[1]*x**2 + param[2]*y + param[3]*y**2 + param[4]*x*y + param[5]
def linear_plane(x, y, param):
return param[0]*x + param[1]*y + param[2]
def minimise_plane(param, x, y, z):
return np.sum((z - linear_plane(x, y, param))**2)
def linear(x, z):
return (np.median(z[np.where(x==np.min(x))])-np.median(z[np.where(x==np.max(x))]))/(np.min(x)-np.max(x))
def remove_background(x, y, z):
p = gradient_exp(x, y, z)
param = np.array((linear(x, z), linear(y,z), np.median(p)))
sol = opt.minimize(minimise_plane, param, args=(x, y, p))
p_n = normalise(p - linear_plane(x, y, sol.x))
return p_n*(np.max(z)-np.min(z)) + np.min(z)
def grad_exp(z, val_x, val_y):
val = z.reshape(val_y, val_x)
scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
[-10+0j, 0+ 0j, +10 +0j],
[ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
grad = ss.convolve2d(val, scharr, boundary='symm', mode='same')
index = np.where(np.logical_or(abs(np.angle(grad).flatten())<=0.15, abs(np.angle(grad).flatten())>=np.pi-0.15))
z[index] = 0
return z
def get_klpq_div(p_probs, q_probs):
# Calcualtes the Kullback-Leibler divergence between pi and qi
kl_div = 0.0
for pi, qi in zip(p_probs, q_probs):
kl_div += pi*np.nan_to_num(np.log(pi/qi))
return kl_div
def D_KL(threshold, x, y):
# Finds best fit Gaussian distribution and calculates the corresponding Kullback-Leibler divergence
index = np.where(np.logical_and(x>=threshold[0], x<=threshold[1]))
xs, ys = x[index], y[index]
if np.trapz(ys)>0:
ys = ys/np.trapz(ys)
else:
return np.inf
guess = np.append(np.median(xs[np.where(ys == np.max(ys))]),
np.append(np.std(xs[np.where(ys > np.median(ys))]),
np.max(ys)))
bounds = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(ys)), (np.max(x)+np.std(x), np.max(x)-np.min(x), 10*np.max(ys)))
fit_param, cov = opt.curve_fit(gauss, xs, ys, guess, bounds=bounds)
return get_klpq_div(ys+10**-7, gauss(xs, *fit_param)+10**-7) # Add small epsilon to ensure that we donn't devide by zero
def minimise_DKL(x, y):
# Estimate first guess and boundaries to use:
guess = np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))
b = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(y)), (np.max(x)+np.std(x), np.max(x)-np.min(x), np.max(y)*10))
fit_param, cov = opt.curve_fit(gauss, x, y, guess, bounds=b)
x0 = [fit_param[0]-2*fit_param[1], fit_param[0]+2*fit_param[1]]
bound = ((np.min(x), fit_param[0]-fit_param[1]), (fit_param[0]+fit_param[1], np.max(x)))
# Find optimal bound solutions
sol = opt.minimize(D_KL, x0, jac=None, method='L-BFGS-B', options={'eps':1/len(x)}, args=(x, y), bounds=bound)
return sol.x
def threshold_DKL(z):
intensity = normalise(z)
x, y = hist_data(intensity)
y = y**0.5 # Broadens peak to allow to identify finer structure in the intensity
threshold = minimise_DKL(x, y)
if abs(np.max(z))>abs(np.min(z)):
index = np.where(intensity>=threshold[1])
else:
index = np.where(intensity<=threshold[0])
return index
def threshold(z, val):
if abs(np.max(z))>abs(np.min(z)):
v = abs(np.min(z))*0.9
else:
v = -abs(np.max(z))*0.9
val = np.append(val, v)
v = np.mean(abs(val))
m = np.where(np.logical_or(z > v, z < -v))
return m, val
def intense(z, index):
x, y = hist_data(z)
guess = np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))
fit_param, cov = opt.curve_fit(gauss, x, y, guess)
return z[index]-fit_param[0]
def threshold_experimental(vg1, vg2, i, q):
i_g, q_g = remove_background(vg1, vg2, i), remove_background(vg1, vg2, q)
m_i, m_q = threshold_DKL(i_g), threshold_DKL(q_g)
index = np.unique(np.append(m_i, m_q))
intensity = normalise(abs(intense(i, index)))+normalise(abs(intense(q, index)))
return vg1[index], vg2[index], intensity, i_g, q_g, index
def threshold_theoretical(vg1, vg2, i):
i_g = gradient(vg1, vg2, i)
x, y = hist_data(i_g)
x = normalise(x)
fit_param = [np.median(x[np.where(y == np.max(y))]), np.std(x[np.where(y > np.median(y))]), np.max(y)]
try:
fit_one, _ = opt.curve_fit(multi_gaussian, x, y, fit_param)
ind = np.where(x > fit_one[0] + fit_one[1])
ys = y[ind] - multi_gaussian(x[ind], *fit_one)
guess = [fit_one[0], np.median(x[ind][np.where(ys == np.max(ys))]),
fit_one[1], np.std(x[np.where(y > np.median(ys))]),
fit_one[2], np.max(ys)]
try:
fit_param, cov = opt.curve_fit(multi_gaussian, x, y, guess)
error = np.sqrt(np.diag(cov))
if error[1] * 10 > error[0]:
index = np.where(normalise(i) > fit_param[1])
else:
index = np.where(normalise(i) > 0.4)
except:
val = np.min(x[np.where(x > fit_one[0] + fit_one[1])])
index = np.where(normalise(i) > val)
except:
index = np.where(normalise(i) > 0.4)
return vg1[index], vg2[index], i[index], x, y, fit_param
def averaging_xy(x, y, intensity, leaf, n_neighbours):
"""
Uses KDTree to find n_neighbours and then calculates a weighted mean, resulting in thinning the data
:param x: threshold x values
:param y: threshold y values
:param intensity: corresponding intensities
:param leaf: determines how many neighbouring points to check, leaf > n_neighbours
:param n_neighbours: number of neighbours to average through
:return: thinned x and y values
"""
data = np.transpose(np.vstack([x, y]))
xs, ys, zs = [], [], []
tree = KDTree(data, leaf_size=leaf) # Finds relation between points
for i in range(0, len(data)):# // n_neighbours):
# Figure out which are the neighbouring points
# dist, ind = tree.query(np.reshape(data[i * n_neighbours, :], (1, -1)), k=n_neighbours)
dist, ind = tree.query(np.reshape(data[i, :], (1, -1)), k=n_neighbours)
# takes weighted average of x and y values of given point
x_m, y_m = np.average(x[ind], weights=intensity[ind]), np.average(y[ind], weights=intensity[ind])
z_m = np.average(intensity[ind])
xs, ys, zs = np.append(xs, x_m), np.append(ys, y_m), np.append(zs, z_m)
return xs, ys, zs
def thinning(Vg1, Vg2, i_g, q_g, ind):
val_x, val_y = len(np.unique(Vg1)), len(np.unique(Vg2))
# Set data points below threshold to zero
M = np.sqrt(i_g**2+q_g**2)
mask = np.ones(M.shape,dtype=bool)
mask[ind] = False
M[mask] = 0
M = grad_exp(M, val_x, val_y)
# Find peaks along x
if val_x > 100:
peaks, hight = ss.find_peaks(M, width=1, distance=val_x//100)
else:
peaks, hight = ss.find_peaks(M, width=1)
xs, ys, zs = Vg1[peaks], Vg2[peaks], M[peaks]
# Find peaks along y
xt = np.reshape(np.transpose(np.reshape(Vg1, (val_y, val_x))), np.shape(Vg1))
yt = np.reshape(np.transpose(np.reshape(Vg2, (val_y, val_x))), np.shape(Vg2))
Mt = np.reshape(np.transpose(np.reshape(M, (val_y, val_x))), np.shape(M))
if val_y > 100:
peaks, hight = ss.find_peaks(Mt, width=1, distance=val_y//100)
else:
peaks, hight = ss.find_peaks(Mt, width=1)
# add peaks from both directions
xs, ys, zs = np.append(xs, xt[peaks]), np.append(ys, yt[peaks]), np.append(zs, Mt[peaks])
# xs, ys, zs = averaging_xy(xs, ys, zs, 100, 10)
return xs, ys, zs
def thinning_IQ(vg1, vg2, z, val_x):
x, y = hist_data(z)
y = y**0.5 # Broadens peak to allow to identify finer structure in the intensity
threshold = minimise_DKL(x, y)
if abs(np.max(z))>abs(np.min(z)):
t = threshold[1]
else:
z = - z
t = abs(threshold[0])
peaks, hight = ss.find_peaks(z, width=3, distance=val_x//50, height=t)
xs, ys, zs = vg1[peaks], vg2[peaks], z[peaks]
return xs, ys, zs
def normalise_hough(data):
"""
Normalised input data and sets theta range
:param data: input data of threshold data points
:return: theta, x and y
"""
x = (data[:, 0] - np.min(data[:, 0])) / (np.max(data[:, 0]) - np.min(data[:, 0]))
y = (data[:, 1] - np.min(data[:, 1])) / (np.max(data[:, 1]) - np.min(data[:, 1]))
f = (np.max(data[:, 1]) - np.min(data[:, 1])) / (np.max(data[:, 0]) - np.min(data[:, 0]))
return x, y, f
def threshold_hough(theta, accumulator):
"""
Takes the accumulator matrix and thresholds the data values
:param theta: array of theta values used
:param accumulator: accumulator of rho
:return: threshold_theta, threshold_d
"""
t = np.tile(theta, len(accumulator))
h, angle, d = np.histogram2d(t, accumulator, bins=len(theta)) # Creating histogram of intensities
index_t, index_d = np.where(h > np.max(h) / 3)
threshold_theta, threshold_d = angle[index_t], d[index_d] # Threshold values of angle and d
return threshold_theta, threshold_d
def hough_transform(samples, theta):
"""
Uses Hough transform to determine lines in stability diagram
:param samples: data points above threshold
:param theta: range of theta values taken into account
:return: grad and y-spacing
"""
x, y = np.reshape(samples[:, 0], (len(samples), 1)), np.reshape(samples[:, 1], (len(samples), 1))
accumulator = np.matmul(x, np.cos(theta)) + np.matmul(y, | np.sin(theta) | numpy.sin |
import numpy as np
import networkx as nx
import pextant_cpp
from .SEXTANTsolver import sextantSearch, SEXTANTSolver, sextantSearchList
from .astar import aStarSearchNode, aStarNodeCollection, aStarCostFunction, aStarSearch
from pextant.EnvironmentalModel import EnvironmentalModel, GridMeshModel
from pextant.lib.geoshapely import GeoPoint, GeoPolygon, LONG_LAT
from pextant.solvers.nxastar import GG, astar_path
from time import time
class MeshSearchElement(aStarSearchNode):
def __init__(self, mesh_element, parent=None, cost_from_parent=0):
self.mesh_element = mesh_element
self.derived = {} #the point of this is to store in memory expensive calculations we might need later
super(MeshSearchElement, self).__init__(mesh_element.mesh_coordinate, parent, cost_from_parent)
def goalTest(self, goal):
return self.mesh_element.mesh_coordinate == goal.mesh_element.mesh_coordinate
#return self.mesh_element.distanceToElt(goal.mesh_element) < self.mesh_element.parentMesh.resolution*3
def getChildren(self):
return MeshSearchCollection(self.mesh_element.getNeighbours(), self)
def __getattr__(self, item):
try:
return MeshSearchElement.__getattribute__(self, item)
except AttributeError:
return getattr(self.mesh_element, item)
def __str__(self):
return str(self.mesh_element)
class MeshSearchCollection(aStarNodeCollection):
def __init__(self, collection, parent=None):
super(MeshSearchCollection, self).__init__(collection)
self.derived = None
self.parent = parent
def __getitem__(self, index):
mesh_search_element = MeshSearchElement(self.collection.__getitem__(index), self.parent)
mesh_search_element.derived = dict(list(zip(['pathlength','time','energy'],self.derived[:,index])))
return mesh_search_element
class ExplorerCost(aStarCostFunction):
def __init__(self, astronaut, environment, optimize_on, cached=False, heuristic_accelerate=1):
"""
:type astronaut: Astronaut
:param environment:
:type environment: GridMeshModel
:param optimize_on:
"""
super(ExplorerCost, self).__init__()
self.explorer = astronaut
self.map = environment
self.optimize_vector = astronaut.optimizevector(optimize_on)
self.heuristic_accelerate = heuristic_accelerate
self.cache = cached
if cached:
self.cache_costs()
def cache_all(self):
end_y, end_x = self.end_node.y, self.end_node.x
self.cache_costs()
self.cache_heuristic((end_x, end_y))
def cache_costs(self):
self.cached["costs"] = self.create_costs_cache()
def create_costs_cache(self):
kernel = self.map.searchKernel
offsets = kernel.getKernel()
dem = self.map
# planar (i.e. x-y) distances to all neighbors (by kernel-index)
dr = np.apply_along_axis(np.linalg.norm, 1, offsets) * self.map.resolution
# elevations
z = self.map.dataset_unmasked
# stored gravity value
g = self.map.getGravity()
# initialize arrays for holding costs
neighbour_size = len(self.map.searchKernel.getKernel())
slopes_rad = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
energy_cost = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
time_cost = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
path_cost = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
for idx, offset in enumerate(offsets):
# planar distance to neighbor at {offset}
dri = dr[idx]
# angle (in radians) between each node and neighbor at {offset}
slopes_rad[:, :, idx] = np.arctan2(np.roll(np.roll(z, -offset[0], axis=0), -offset[1], axis=1) - z, dri)
# calculate {energy cost} and {planar velocity} from slope, distance, and gravity
energy_cost[:, :, idx], v = self.explorer.energy_expenditure(dri, slopes_rad[:, :, idx], g)
# time = distance / rate
time_cost[:,:,idx] = dri/v
# total, 3-dimensional distance traveled
path_cost[:,:,idx] = dri/np.cos(slopes_rad[:, :, idx])*np.ones_like(z)
return {'time': time_cost, 'path': path_cost, 'energy': energy_cost}
def cache_heuristic(self, goal):
self.cached["heuristics"] = self.create_heuristic_cache(goal)
def create_heuristic_cache(self, goal):
# get planar distance to goal from each grid location
oct_grid_distance = self.map.get_oct_grid_distance_to_point(goal)
# Adding the energy weight
explorer = self.explorer
m = explorer.mass
planet = self.map.planet
energy_weight = explorer.minenergy[planet](m) # to minimize energy cost
max_velocity = explorer.maxvelocity # to minimize time cost
optimize_weights = self.optimize_vector
optimize_values = np.array([
1, # Distance per m
max_velocity, # time per m
energy_weight # energy per m
])
optimize_cost = oct_grid_distance * np.dot(optimize_values, optimize_weights)
heuristic_cost = self.heuristic_accelerate * optimize_cost
return heuristic_cost
def get_cache_heuristic(self, start_row, start_col):
return self.cached["heuristics"][start_row, start_col]
def getHeuristicCost(self, elt):
node = elt.mesh_element
start_row, start_col = node.mesh_coordinate
heuristic_fx = self.get_cache_heuristic if self.cache else self._getHeuristicCost
return heuristic_fx(start_row, start_col)
def getHeuristicCostRaw(self, rowcol):
start_row, start_col = rowcol
heuristic_fx = self.get_cache_heuristic if self.cache else self._getHeuristicCost
return heuristic_fx(start_row, start_col)
def _getHeuristicCost(self, start_row, start_col):
r = self.map.resolution
start_x, start_y = r*start_col, r*start_row
end_x, end_y = self.end_node.x, self.end_node.y
optimize_vector = self.optimize_vector
# max number of diagonal steps that can be taken
h_diagonal = min(abs(start_y - end_y), abs(start_x - end_x))
h_straight = abs(start_y - end_y) + abs(start_x - end_x) # Manhattan distance
h_oct_grid = np.sqrt(2) * h_diagonal + (h_straight - 2 * h_diagonal)
# Adding the energy weight
m = self.explorer.mass
min_energy_function = self.explorer.minenergy[self.map.planet]
min_energy = min_energy_function(m) # min to keep heuristic admissible
max_velocity = self.explorer.maxvelocity # max v => min time, also to keep heuristic admissible
# determine value to multiply 'optimal distance' value by to get best admissible heuristic
admissible_values = np.array([1, max_velocity, min_energy])
admissible_weight = np.dot(admissible_values, optimize_vector)
# Patel 2010. See page 49 of Aaron's thesis
heuristic_weight = self.heuristic_accelerate
heuristic_cost = heuristic_weight * admissible_weight * h_oct_grid
return heuristic_cost
def getCostBetween(self, fromnode, tonodes):
""":type fromnode: MeshSearchElement"""
from_elt = fromnode.mesh_element
to_cllt = tonodes.collection
if self.cache:
row, col = from_elt.mesh_coordinate
selection = self.map.cached_neighbours[row,col]
costs = self.cached["costs"]
optimize_vector = np.array([
costs['path'][row, col][selection],
costs['time'][row, col][selection],
costs['energy'][row, col][selection]
])
else:
optimize_vector = self.calculateCostBetween(from_elt, to_cllt)
optimize_weights = self.optimize_vector
costs = np.dot(optimize_vector.transpose(), optimize_weights)
tonodes.derived = optimize_vector
return list(zip(tonodes, to_cllt.get_states(), costs))
def getCostToNeighbours(self, from_node):
row, col = from_node.state
neighbours = self.map.cached_neighbours(from_node.state)
return self.cached[row, col, neighbours]
def calculateCostBetween(self, from_elt, to_elts):
"""
Given the start and end states, returns the cost of travelling between them.
Allows for states which are not adjacent to each other.
optimize_vector is a list or tuple of length 3, representing the weights of
Distance, Time, and Energy
Performance optimization: tonodes instead of tonode, potentially numpy optimized, only need to load info
from fromnode once
"""
explorer = self.explorer
slopes, path_lengths = from_elt.slopeTo(to_elts)
times = explorer.time(path_lengths, slopes)
g = self.map.getGravity()
energy_cost, _ = explorer.energy_expenditure(path_lengths, slopes, g)
#TODO: rewrite this so not all functions need to get evaluated(expensive)
optimize_vector = np.array([
path_lengths,
times,
energy_cost
])
return optimize_vector
class astarSolver(SEXTANTSolver):
# algorithm type 'enum' rather than bool (previously: inhouse=true/false)
PY_INHOUSE = 1
PY_NETWORKX = 2
CPP_NETWORKX = 3
def __init__(self, env_model, explorer_model, viz=None, optimize_on='Energy',
cached=False, algorithm_type=PY_INHOUSE, heuristic_accelerate=1):
self.explorer_model = explorer_model
self.optimize_on = optimize_on
self.cache = env_model.cached
self.algorithm_type = algorithm_type
self.G = None
cost_function = ExplorerCost(explorer_model, env_model, optimize_on, env_model.cached, heuristic_accelerate)
super(astarSolver, self).__init__(env_model, cost_function, viz)
# if using networkx-based implementation, set G
if algorithm_type == astarSolver.PY_NETWORKX or algorithm_type == astarSolver.CPP_NETWORKX:
self.G = GG(self)
# if we're using CPP external module
if algorithm_type == astarSolver.CPP_NETWORKX:
# create CPP object
self.path_finder = pextant_cpp.PathFinder()
# set kernel
kernel_list = self.env_model.searchKernel.getKernel().tolist()
self.path_finder.set_kernel(kernel_list)
# cache data
cached_costs = self.cost_function.cached["costs"]
if cached_costs is None:
cached_costs = self.cost_function.create_costs_cache()
cost_map = cached_costs["energy"].tolist()
self.path_finder.cache_costs(cost_map)
obstacle_map = self.env_model.obstacles.astype(int).tolist()
self.path_finder.cache_obstacles(obstacle_map)
def accelerate(self, weight=10):
self.cost_function = ExplorerCost(self.explorer_model, self.env_model, self.optimize_on,
self.cache, heuristic_accelerate=weight)
def solve(self, startpoint, endpoint):
if self.algorithm_type == astarSolver.CPP_NETWORKX:
solver = self.solvenx_cpp
elif self.algorithm_type == astarSolver.PY_NETWORKX:
solver = self.solvenx
else: # self.algorithm_type == astarSolver.PY_INHOUSE
solver = self.solveinhouse
return solver(startpoint, endpoint)
def solveinhouse(self, startpoint, endpoint):
env_model = self.env_model
if env_model.elt_hasdata(startpoint) and env_model.elt_hasdata(endpoint):
node1, node2 = MeshSearchElement(env_model.getMeshElement(startpoint)), \
MeshSearchElement(env_model.getMeshElement(endpoint))
solution_path, expanded_items = aStarSearch(node1, node2, self.cost_function, self.viz)
raw, nodes = solution_path
if len(raw) == 0:
coordinates = []
else:
coordinates = GeoPolygon(env_model.ROW_COL, *np.array(raw).transpose())
search = sextantSearch(raw, nodes, coordinates, expanded_items)
self.searches.append(search)
return search
else:
return False
def solvenx(self, startpoint, endpoint):
env_model = self.env_model
cost_function = self.cost_function
start = env_model.getMeshElement(startpoint).mesh_coordinate
target = env_model.getMeshElement(endpoint).mesh_coordinate
if env_model.elt_hasdata(startpoint) and env_model.elt_hasdata(endpoint):
if self.G == None:
self.G = GG(self)
cost_function.setEndNode(MeshSearchElement(env_model.getMeshElement(endpoint)))
try:
raw = astar_path(self.G, start, target, lambda a, b: cost_function.getHeuristicCostRaw(a))
coordinates = GeoPolygon(self.env_model.COL_ROW, *np.array(raw).transpose()[::-1])
search = sextantSearch(raw, [], coordinates, [])
self.searches.append(search)
return search
except nx.NetworkXNoPath:
return False
else:
return False
def solvenx_cpp(self, startpoint, endpoint):
# reset any prior progress
self.path_finder.reset_progress()
# get source and target coordinates
source = self.env_model.getMeshElement(startpoint).mesh_coordinate # unscaled (row, column)
target = self.env_model.getMeshElement(endpoint).mesh_coordinate # unscaled (row, column)
# check that we have data at both start and end
if self.env_model.elt_hasdata(startpoint) and self.env_model.elt_hasdata(endpoint):
# cache heuristic
heuristics_map = self.cost_function.create_heuristic_cache(target).tolist()
self.path_finder.cache_heuristics(heuristics_map)
# perform search
raw = self.path_finder.astar_solve(source, target)
# if we have a good result
if len(raw) > 0:
# append result to 'searches' list and return
coordinates = GeoPolygon(self.env_model.COL_ROW, *np.array(raw).transpose()[::-1])
search = sextantSearch(raw, [], coordinates, [])
self.searches.append(search)
return search
# default to fail result
return False
def weight(self, a, b):
selection = ( | np.array(a) | numpy.array |
"""RGN model for 3D protein structure prediction.
The implicit ordering of tensor dimensions is:
[NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS, NUM_DIMENSIONS]
Tensors have this orientation unless otherwise labeled.
"""
import os
from glob import glob
from copy import deepcopy
from itertools import zip_longest
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from tensorflow.python.ops import control_flow_ops
from geom_ops import *
from net_ops import *
from utils import *
import rnn
import transformer
# Public interface
SCOPE = 'RGN'
DUMMY_LOSS = -1.
LOSS_SCALING_FACTOR = 0.01 # this is to convert recorded losses to angstroms
class RGNModel(object):
"""Recurrent geometric network model
Attributes:
mode: train or predict
config: parameter dictionary
"""
# static variable to control creation of new objects and starting the model
_is_started = False
# ??? Should this be called model number not number of models?
_num_models = 0
def __init__(self, mode, config):
"""Sets up configurations and invokes the TF graph. """
# Make sure model hasn't been started.
if not RGNModel._is_started:
self.mode = mode
self.config = deepcopy(config)
# Set up public methods based on mode (for initial state).
if mode == 'training':
self.start = self._start
else:
self.evaluate = self._evaluate
self.predict = self._predict
# Process config for derived properties
io = self.config.io
arch = self.config.architecture
reg = self.config.regularization
curr = self.config.curriculum
opt = self.config.optimization
init = self.config.initialization
# Test for correct curriculum configuration
if curr['mode'] is None and curr['behavior'] is not None:
raise RuntimeError(
'Must set curriculum mode if curriculum behavior is set.')
elif curr['mode'] is not None and curr['behavior'] is None:
raise RuntimeError(
'Must set curriculum behavior if curriculum mode is set.')
# model name
if io['name'] is None:
io['name'] = 'model_' + str(RGNModel._num_models)
RGNModel._num_models = RGNModel._num_models + 1
# ??? what does this file contain?
# alphabet-related
arch['alphabet'] = np.loadtxt(io['alphabet_file'], delimiter = ',')[:, 6:] \
if io['alphabet_file'] is not None else None
# set alphabet size if implicit
if arch['alphabet'] is not None:
arch['alphabet_size'] = len(arch['alphabet'])
# having multiple alphabets is isomorphic to not reusing alphabet
arch['single_or_no_alphabet'] = type(arch['alphabet_size']) is not list
arch['is_alphabetized'] = 'alphabet' in arch['tertiary_output']
# angularization
arch['is_angularized'] = 'angular' in arch['tertiary_output']
# optimization
if opt['optimizer'] == 'adadelta':
opt.update({'rho': opt['decay']})
# initialization
if arch['higher_order_layers']:
for key in ['recurrent_init']:
if type(init[key]) is not list:
init[key] = [init[key]] * len(arch['recurrent_layer_size'])
if arch['recurrent_nonlinear_out_proj_size'] is not None:
for key in ['recurrent_nonlinear_out_proj_init']:
if type(init[key]) is not list:
init[key] = [init[key]] * len(arch['recurrent_nonlinear_out_proj_size'])
# regularization
for key in ['recurrent_input_keep_probability',
'recurrent_output_keep_probability',
'recurrent_keep_probability',
'recurrent_state_zonein_probability',
'recurrent_memory_zonein_probability',
'alphabet_keep_probability',
'alphabet_normalization']:
if type(reg[key]) is not list:
reg[key] = [reg[key]] * len(arch['recurrent_layer_size'])
# create graph
self._create_graph(mode, self.config)
else:
raise RuntimeError('Model already started; cannot create new objects.')
def _create_graph(self, mode, config):
"""Creates TensorFlow computation graph depending on the mode.
Builds the head (training) graph to start, train, and checkpoint a model.
Or create any number of 'evaluation' models that depend on the head model,
but with additional data sets, different model semantics (e.g. no dropout)
for the evaluation, and logging of their performance.
Two types of internal variables stored in each object:
ops collections, like training_ops, evaluation_ops, etc.
As the graph is built up, ops are added to these lists.
various nodes that are like TF methods, like the initializer, saver, etc,
which are stored in the object and are accessed by various methods when necessary.
Args:
mode: training or predicting
config: dictionary of configuration parameters
"""
# set up appropriate op collections based on mode (for initial state)
if mode == 'training':
# collection of ops to be run at each step of training
self._training_ops = training_ops = {}
# collection of ops for diagnostics like weight norms and curriculum quantiles
self._diagnostic_ops = diagnostic_ops = {}
else:
# collection of ops for evaluation of losses
self._evaluation_ops = evaluation_ops = {}
# collection of ops for the last evaluation in a multi-invocation evaluation
self._last_evaluation_ops = last_evaluation_ops = {}
# collection of ops for prediction of structures
self._prediction_ops = prediction_ops = {}
# set variable scoping, op scoping, and place on appropriate device
with tf.variable_scope(SCOPE, reuse=(mode == 'evaluation')) as scope, \
tf.name_scope(SCOPE + '/' + config.io['name'] + '/'), \
tf.device(_device_function_constructor(
**{k: config.computing[k] for k in ('functions_on_devices', 'default_device')})):
# set graph seed
if mode == 'training':
tf.set_random_seed(config.initialization['graph_seed'])
# Create curriculum state and tracking variables if needed.
if config.curriculum['mode'] is not None:
# Variable to hold current curriculum iteration
curriculum_step = tf.get_variable(
name='curriculum_step',
shape=[],
trainable=False,
initializer=tf.constant_initializer(config.curriculum['base']))
if mode == 'training':
diagnostic_ops.update({'curriculum_step': curriculum_step})
# Set up data ports
if mode == 'training':
self._coordinator = tf.train.Coordinator()
if config.curriculum['mode'] == 'length':
max_length = tf.reduce_min(
[curriculum_step, config.optimization['num_steps']])
max_length = tf.cast(max_length, tf.int32)
else:
max_length = config.optimization['num_steps']
dataflow_config = merge_dicts(
config.io,
config.initialization,
config.optimization,
config.queueing)
ids, primaries, evolutionaries, secondaries, tertiaries, \
masks, num_stepss = _dataflow(dataflow_config, max_length)
# Set up inputs
inputs = _inputs(
merge_dicts(config.architecture, config.initialization),
primaries,
evolutionaries)
# Compute dRMSD weights
# Masks out meaningless (longer than sequence) pairwise distances
# Incorporates curriculum weights
weights_config = merge_dicts(
config.optimization,
config.curriculum,
config.loss,
config.io)
weights, flat_curriculum_weights = _weights(
weights_config,
masks,
curriculum_step if config.curriculum['mode'] == 'loss' else None)
if mode == 'training' and config.curriculum['mode'] == 'loss':
diagnostic_ops.update({'flat_curriculum_weights': flat_curriculum_weights})
# create alphabet if needed and if it will be shared between layers,
# otherwise set to None so that _dihedrals takes care of it
alphabet_config = merge_dicts(
config.architecture,
config.initialization)
if alphabet_config['is_alphabetized'] \
and alphabet_config['single_or_no_alphabet']:
alphabet = _alphabet(mode, alphabet_config)
if mode == 'training' and config.io['log_alphabet']:
diagnostic_ops.update({'alphabet': alphabet})
else:
alphabet = None
for case in switch(config.architecture['internal_representation']):
if case('transformer'):
transformer_config = merge_dicts(
config.initialization,
config.architecture,
config.regularization,
config.optimization)
inputs2 = tf.transpose(inputs, perm=[1,0,2])
recurrent_outputs = transformer._encoder_model(
inputs2,
transformer_config,
mode
)
recurrent_outputs = tf.transpose(
recurrent_outputs,
perm=[1,0,2])
elif case('recurrent'):
# Create recurrent layer(s) that translate
# primary sequences into internal representation
recurrence_config = merge_dicts(
config.initialization,
config.architecture,
config.regularization,
config.optimization,
config.computing, config.io)
# inputs: [NUM_STEPS, BATCH_SIZE, RECURRENT_LAYER_SIZE]
# recurrent_outputs: [NUM_STEPS, BATCH_SIZE, RECURRENT_LAYER_SIZE]
recurrent_outputs, recurrent_states = rnn._higher_recurrence(
mode,
recurrence_config,
inputs,
num_stepss,
alphabet=alphabet)
elif case('none'):
recurrent_outputs = inputs
else:
raise ValueError('Not an available internal representation.')
# Tertiary structure generation
if config.loss['tertiary_weight'] > 0:
# Convert internal representation to
# (thru some number of possible ways) dihedral angles
dihedrals_config = merge_dicts(
config.initialization,
config.optimization,
config.architecture,
config.regularization,
config.io)
dihedrals_config.update({
k: dihedrals_config[k][-1] for k in [
'alphabet_keep_probability',
'alphabet_normalization']})
if not dihedrals_config['single_or_no_alphabet']:
dihedrals_config.update({
'alphabet_size': dihedrals_config['alphabet_size'][-1]})
dihedrals = _dihedrals(
mode,
dihedrals_config,
recurrent_outputs,
alphabet=alphabet)
# Convert dihedrals into full 3D structures and compute dRMSDs
coordinates = _coordinates(
merge_dicts(
config.computing,
config.optimization,
config.queueing),
dihedrals)
drmsds = _drmsds(
merge_dicts(
config.optimization,
config.loss,
config.io),
coordinates,
tertiaries,
weights)
if mode == 'evaluation':
prediction_ops.update({
'ids': ids,
'coordinates': coordinates,
'num_stepss': num_stepss,})
# 'recurrent_states': recurrent_states})
# Losses
if config.loss['include']:
filters = {grp: id_filter(ids, grp) \
for grp in config.io['evaluation_sub_groups']} \
if mode == 'evaluation' else {}
filters.update({'all': tf.tile([True], tf.shape(ids))})
for group_id, group_filter in filters.items():
with tf.variable_scope(group_id):
# Tertiary loss
effective_tertiary_loss = 0.
if config.loss['tertiary_weight'] > 0:
if config.queueing['num_evaluation_invocations'] > 1 \
and mode == 'training':
raise RuntimeError('Cannot use multiple invocations with training mode.')
else:
# Compute tertiary loss quotient parts by reducing dRMSDs
# based on normalization behavior
tertiary_loss_numerator, tertiary_loss_denominator = _reduce_loss_quotient(
merge_dicts(config.loss, config.io, config.optimization),
drmsds,
masks,
group_filter,
name_prefix='tertiary_loss')
# Handles multiple invocations and gracefully degrades for single invocations.
# Variables are created below _per_ evaluation model, which is a deviation from my general design
# the scope of those variables is the evaluation model's, _not_ the training model's as usual
tertiary_loss, min_loss_achieved, min_loss_op, \
update_accu_op, reduce_accu_op = _accumulate_loss(
merge_dicts(config.io, config.queueing),
tertiary_loss_numerator,
tertiary_loss_denominator,
name_prefix='tertiary_loss')
if mode == 'evaluation':
evaluation_ops.update({
'update_accumulator_' + group_id + '_op': update_accu_op})
last_evaluation_ops.update({
'tertiary_loss_' + group_id: tertiary_loss * LOSS_SCALING_FACTOR,
'reduce_accumulator_' + group_id + '_op': reduce_accu_op,
'min_tertiary_loss_achieved_' + group_id: min_loss_achieved * LOSS_SCALING_FACTOR,
'min_tertiary_loss_' + group_id + '_op': min_loss_op})
if config.io['log_model_summaries']:
tf.add_to_collection(
config.io['name'] + '_tertiary_losses',
tertiary_loss)
effective_tertiary_loss = config.loss['tertiary_weight'] * tertiary_loss
# Final loss and related housekeeping
loss = tf.identity(effective_tertiary_loss, name='loss')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # batch_norm related
if update_ops:
loss = control_flow_ops.with_dependencies(
tf.tuple(update_ops),
loss)
if config.io['log_model_summaries']:
tf.add_to_collection(
config.io['name'] + '_losses',
loss)
if group_id == config.curriculum['loss_history_subgroup']:
curriculum_loss = loss
# Curriculum loss history; not always used but design
# is much cleaner if always created.
curriculum_loss_history = tf.get_variable(
initializer=tf.constant_initializer([DUMMY_LOSS] \
* config.curriculum['change_num_iterations']),
shape=[config.curriculum['change_num_iterations']],
trainable=False,
name='curriculum_loss_history')
if mode == 'evaluation' and config.curriculum['update_loss_history']:
update_curriculum_history_op = _history(
config.io,
curriculum_loss,
curriculum_loss_history)
last_evaluation_ops.update({
'update_curriculum_history_op': update_curriculum_history_op})
# Training
if mode == 'training':
# get grads, training ops
self._global_step, minimize_op, grads_and_vars_dict = _training(
config.optimization, loss)
self._grads_and_vars_length = len(grads_and_vars_dict) // 2
# update relevant op dicts
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# if update_ops:
# training_ops.update({'update_ops': tf.tuple(update_ops)})
training_ops.update({
'minimize_op': minimize_op,
'global_step': self._global_step,
'ids': ids})
diagnostic_ops.update(grads_and_vars_dict)
# Curriculum
if mode == 'training' \
and config.curriculum['behavior'] in [
'fixed_rate',
'loss_threshold',
'loss_change']:
curriculum_update_op = _curriculum(
config.curriculum,
curriculum_step,
curriculum_loss_history,
[minimize_op])
training_ops.update({
'curriculum_update_op': curriculum_update_op})
def _train(self, session):
"""Performs one iteration of training.
If applicable, advances the curriculum.
Args:
session: tf session
Returns:
global step
ids
"""
training_dict = ops_to_dict(session, self._training_ops)
return training_dict['global_step'], training_dict['ids']
def _evaluate(self, session, pretty=True):
"""Evaluates loss(es) and returns dicts with the relevant loss(es).
Args:
session: tf session
pretty: pretty print
Returns:
evaluation dict
"""
if RGNModel._is_started:
# evaluate
num_invocations = self.config.queueing['num_evaluation_invocations']
for invocation in range(num_invocations):
if invocation < num_invocations - 1:
evaluation_dict = ops_to_dict(
session,
self._evaluation_ops)
else:
evaluation_dict = ops_to_dict(
session,
merge_dicts(
self._evaluation_ops,
self._last_evaluation_ops))
# write event summaries to disk
if self.config.io['log_model_summaries']:
self._summary_writer.add_summary(
evaluation_dict['merged_summaries_op'],
global_step=evaluation_dict['global_step'])
# remove non-user facing ops
if pretty:
[evaluation_dict.pop(k) for k in list(evaluation_dict.keys()) if 'op' in k]
return evaluation_dict
else:
raise RuntimeError('Model has not been started or has already finished.')
def _predict(self, session):
"""Predict 3D structures.
Args:
session: tf session
Returns:
dict of prediction
"""
if RGNModel._is_started:
# evaluate prediction dict
prediction_dict = ops_to_dict(session, self._prediction_ops)
# process tertiary sequences
if 'coordinates' in prediction_dict:
prediction_dict['coordinates'] = np.transpose(
prediction_dict['coordinates'], (1, 2, 0))
# generate return dict
predictions = {}
for id_, num_steps, tertiary, recurrent_states \
in zip_longest(*[prediction_dict.get(key, []) \
for key in ['ids', 'num_stepss', 'coordinates', 'recurrent_states']]):
prediction = {}
if tertiary is not None:
last_atom = (num_steps - self.config.io['num_edge_residues']) * NUM_DIHEDRALS
prediction.update({'tertiary': tertiary[:, :last_atom]})
prediction.update({'recurrent_states': recurrent_states})
predictions.update({id_: prediction})
return predictions
else:
raise RuntimeError('Model has not been started or has already finished.')
def _diagnose(self, session, pretty=True):
""" Compute diagnostic measurements
Ex. weight norms and curriculum quantiles.
Args:
session: tf session
pretty: pretty print
Returns:
diagnostic dict
"""
# for k, v in self._diagnostic_ops.items():
# print("KEY: ", k, " VALUE: ", v)
diagnostic_dict = ops_to_dict(session, self._diagnostic_ops)
# write event summaries to disk
if self.config.io['log_model_summaries']:
for op in ['merged_summaries_op', 'base_merged_summaries_op']:
self._summary_writer.add_summary(
diagnostic_dict[op],
global_step=diagnostic_dict['global_step'])
# compute max/min of vars and grads
vars_ = [diagnostic_dict['v' + str(i)] \
for i in range(self._grads_and_vars_length)]
grads = [diagnostic_dict['g' + str(i)] \
for i in range(self._grads_and_vars_length)]
diagnostic_dict.update({
'min_weight': np.min([np.min(var) for var in vars_]),
'max_weight': np.max([np.max(var) for var in vars_]),
'min_grad': np.min([ | np.min(grad) | numpy.min |
# python GenerateImages.py [-h] -i INPUT -o OUTPUT [-n NUMBER]
# Import all the needed modules
from torchvision import transforms
from scipy.stats import norm
from PIL import Image
import numpy as np
import argparse
import decimal
import random
import glob
import cv2
# Helper function to generate a mask for parallel light method
def generate_parallel_light_mask(mask_size,
max_brightness=255,
min_brightness=0,
mode="gaussian"):
pos_x = random.randint(0, mask_size[0])
pos_y = random.randint(0, mask_size[1])
direction = random.randint(0, 360)
padding = int(max(mask_size) * np.sqrt(2))
canvas_x = padding * 2 + mask_size[0]
canvas_y = padding * 2 + mask_size[1]
mask = np.zeros(shape=(canvas_y, canvas_x), dtype=np.float32)
init_mask_ul = (int(padding), int(padding))
init_mask_br = (int(padding+mask_size[0]), int(padding+mask_size[1]))
init_light_pos = (padding + pos_x, padding + pos_y)
for i in range(canvas_y):
i_value = _decayed_value_in_norm(i, max_brightness, min_brightness, init_light_pos[1], mask_size[1])
mask[i] = i_value
rotate_M = cv2.getRotationMatrix2D(init_light_pos, direction, 1)
mask = cv2.warpAffine(mask, rotate_M, (canvas_x, canvas_y))
mask = mask[init_mask_ul[1]:init_mask_br[1], init_mask_ul[0]:init_mask_br[0]]
mask = np.asarray(mask, dtype=np.uint8)
mask = cv2.medianBlur(mask, 9)
mask = 255 - mask
return mask
# Helper function for parallel light method
def _decayed_value_in_norm(x, max_value, min_value, center, range):
radius = range / 3
center_prob = norm.pdf(center, center, radius)
x_prob = norm.pdf(x, center, radius)
x_value = (x_prob / center_prob) * (max_value - min_value) + min_value
return x_value
# Helper function for parallel light method
def _decayed_value_in_linear(x, max_value, padding_center, decay_rate):
x_value = max_value - abs(padding_center - x) * decay_rate
if x_value < 0:
x_value = 1
return x_value
# Helper function to generate a mask for the spot light method
def generate_spot_light_mask(mask_size,
max_brightness = 255,
min_brightness = 0,
mode = "gaussian",
speedup = False):
position = [(random.randint(0, mask_size[0]), random.randint(0, mask_size[1]))]
mask = np.zeros(shape=(mask_size[1], mask_size[0]), dtype=np.float32)
mu = np.sqrt(mask.shape[0]**2+mask.shape[1]**2)
dev = mu / 3.5
mask = _decay_value_radically_norm_in_matrix(mask_size, position, max_brightness, min_brightness, dev)
mask = np.asarray(mask, dtype=np.uint8)
mask = cv2.medianBlur(mask, 5)
mask = 255 - mask
return mask
# Helper function for the spot light method
def _decay_value_radically_norm_in_matrix(mask_size, centers, max_value, min_value, dev):
center_prob = norm.pdf(0, 0, dev)
x_value_rate = np.zeros((mask_size[1], mask_size[0]))
for center in centers:
coord_x = np.arange(mask_size[0])
coord_y = np.arange(mask_size[1])
xv, yv = np.meshgrid(coord_x, coord_y)
dist_x = xv - center[0]
dist_y = yv - center[1]
dist = np.sqrt(np.power(dist_x, 2) + np.power(dist_y, 2))
x_value_rate += norm.pdf(dist, 0, dev) / center_prob
mask = x_value_rate * (max_value - min_value) + min_value
mask[mask > 255] = 255
return mask
# Helper function for the spot light method
def _decay_value_radically_norm(x, centers, max_value, min_value, dev):
center_prob = norm.pdf(0, 0, dev)
x_value_rate = 0
for center in centers:
distance = np.sqrt((center[0]-x[0])**2 + (center[1]-x[1])**2)
x_value_rate += norm.pdf(distance, 0, dev) / center_prob
x_value = x_value_rate * (max_value - min_value) + min_value
x_value = 255 if x_value > 255 else x_value
return x_value
# Allowing users to give input as command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to the folder containing images")
ap.add_argument("-o", "--output", required=True,
help="path to output folder for storing augmented images")
args = vars(ap.parse_args())
# Reading all images for a given folder
path = args["input"]
ext = ['png', 'jpg'] # Add image formats here
files = []
[files.extend(glob.glob(path + '*.' + e)) for e in ext]
images = [cv2.imread(file) for file in files]
# Starting with augmentation
output = args["output"]
i = 1
for image in images:
# Augmentation by flipping images
flip = cv2.flip(image, 0) # Flip an image vertically
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, flip)
flip = cv2.flip(image, 1) # Flip an image horizontally
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, flip)
flip = cv2.flip(image, -1) # Flip an image both vertically and horizontally
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, flip)
# Changing brightness out an image
for num in range (0, 5):
gamma = float(decimal.Decimal(random.randrange(10, 1000))/100)
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
bright = cv2.LUT(image, table)
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, bright)
# Changing image to black and white
r,g,b = image[:,:,0], image[:,:,1], image[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, gray)
# Changing contrast of the image
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
for num in range(0, 5):
value = float(decimal.Decimal(random.randrange(10, 1000))/100)
clahe = cv2.createCLAHE(clipLimit=value, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, final)
# Augmentation using cropping
crops = []
(h, w) = image.shape[:2]
width = w - 150
height = h - 100
coords = [
[0, 0, width, height],
[w - width, 0, w, height],
[w - width, h - height, w, h],
[0, h - height, width, h]]
dW = int(0.5 * (w - width))
dH = int(0.5 * (h - height))
coords.append([dW, dH, w - dW, h - dH])
for (startX, startY, endX, endY) in coords:
crop = image[startY:endY, startX:endX]
crop = cv2.resize(crop, (width, height), interpolation=cv2.INTER_AREA)
crops.append(crop)
for c in crops:
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, c)
# Average blurring
blur = cv2.blur(image,(5,5))
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, blur)
# Gaussian blur
blur = cv2.GaussianBlur(image,(5,5),0)
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, blur)
# Median Blur
median = cv2.medianBlur(image,5)
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, median)
# Bilateral Filtering
blur = cv2.bilateralFilter(image,9,75,75)
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, blur)
# Adding padding to image
padded = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_CONSTANT)
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, padded)
# Translation
num_rows, num_cols = image.shape[:2]
translation_matrix = np.float32([ [1,0,70], [0,1,110]])
dst = cv2.warpAffine(image, translation_matrix, (num_cols, num_rows))
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, dst)
# Translation with lesser cropping"
dst = cv2.warpAffine(image, translation_matrix, (num_cols + 90, num_rows + 150))
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, dst)
# Translation with image in the midddle of a bigger frame",
dst = cv2.warpAffine(image, translation_matrix, (num_cols + 70, num_rows + 110))
translation_matrix = np.float32([ [1,0,-30], [0,1,-50] ])
dst = cv2.warpAffine(dst, translation_matrix, (num_cols + 70 + 30, num_rows + 110 + 50))
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, dst)
# Histogram Equalization
B, G, R = cv2.split(image)
B = cv2.equalizeHist(B)
G = cv2.equalizeHist(G)
R = cv2.equalizeHist(R)
equalized = cv2.merge((B, G, R))
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, equalized)
#CLAHE - Contrast Limited Adaptive Histogram Equalization
B, G, R = cv2.split(image)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(B)
cl2 = clahe.apply(G)
cl3 = clahe.apply(R)
claheImage = cv2.merge((B, G, R))
savePath = output + str(i) + ".png"
i += 1
cv2.imwrite(savePath, claheImage)
# Saturation
img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # Convert openCV image to PIL image
loader_transform = transforms.ColorJitter(saturation=1)
imgOut = loader_transform(img)
savePath = output + str(i) + ".png"
i += 1
numpy_image = np.array(imgOut) # converting PIL image back to openCV
imgOut=cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) # the color is converted from RGB to BGR format
cv2.imwrite(savePath, imgOut)
# Hue
img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # Convert openCV image to PIL image
loader_transform = transforms.ColorJitter(hue=0.2)
imgOut = loader_transform(img)
savePath = output + str(i) + ".png"
i += 1
numpy_image = | np.array(imgOut) | numpy.array |
# -*- coding:utf-8 -*-
import argparse
import codecs
import os
import random
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.callbacks import ModelCheckpoint, TensorBoard
from modeling_bert import BertForSequenceClassification
from tensorflow.python.keras.models import load_model
from sklearn.metrics import classification_report, confusion_matrix
from finetune.tokenization_bert import BertTokenizer
from finetune.dataset import ChnSentiCorpDataset
import time
from datetime import datetime
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def set_random():
# seed
random.seed(42)
np.random.seed(42)
tf.set_random_seed(42)
TASK_NAMES = {
'chnsenticorp': ChnSentiCorpDataset
}
def train(opts):
tokenizer = BertTokenizer.from_pretrained(opts.pretrained_path)
# get dataset
dataset = TASK_NAMES[opts.task_name](opts.data_dir, tokenizer, opts.max_seq_len)
X_train, y_train = dataset.get_train_datasets()
X_dev, y_dev = dataset.get_dev_datasets()
opts.num_labels = len(dataset.get_labels())
# build model
optimizer = tf.keras.optimizers.Adam(lr=opts.lr, epsilon=1e-08)
model = BertForSequenceClassification().build(opts)
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.summary()
# callbacks: save model
filepath = os.path.join(opts.save_dir, "{epoch:02d}-{val_acc:.4f}.hdf5")
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=False, mode='max')
# callbacks: tensorboard
tensorboard_dir = os.path.join(opts.log_dir, datetime.now().strftime("%Y%m%d-%H%M"))
tensorboard = TensorBoard(log_dir=tensorboard_dir)
model.fit(X_train, y_train,
batch_size=opts.batch_size,
epochs=opts.epochs,
validation_data=(X_dev, y_dev),
shuffle=True,
callbacks=[checkpoint, tensorboard])
X_test, y_test = dataset.get_test_datasets()
score, acc = model.evaluate(X_test, y_test, batch_size=opts.batch_size)
print('test score:', score)
print('test accuracy:', acc)
def test(opts):
tokenizer = BertTokenizer.from_pretrained(opts.pretrained_path)
dataset = TASK_NAMES[opts.task_name](opts.data_dir, tokenizer, opts.max_seq_len)
X_test, y_test = dataset.get_test_datasets()
# use get custiom_object to load model
model = load_model(opts.save_dir)
start_time = time.time()
y_pred = model.predict(X_test, batch_size=opts.batch_size)
y_pred = | np.argmax(y_pred, axis=1) | numpy.argmax |
import numpy as np
import numpy.testing as npt
from cgtools import vector as V
def test_inv_3x4():
Xs = np.random.random((1000, 3, 4))
Xs_inv_np = np.array([np.linalg.inv(V.to_4x4(Xi)) for Xi in Xs])
Xs_inv_ours = V.inv_3x4(Xs)
assert np.allclose(Xs_inv_np[:, 3, :], np.array([0, 0, 0, 1])[np.newaxis])
npt.assert_allclose(Xs_inv_ours, Xs_inv_np[:, :3, :])
def test_transform_many_matrices_many_vectors():
for dim in [2, 3, 4]:
# no translation:
vectors = np.random.random((1000, dim))
xforms = np.random.random(((1000, dim, dim)))
reference = np.array([
M.dot(v)
for v, M in zip(vectors, xforms)
])
result = V.transform(vectors, xforms)
assert result.shape == vectors.shape
npt.assert_allclose(reference, result)
# with translation, no perspective (e.g. the common 3x4 matrices)
vectors = np.random.random((1000, dim))
xforms = np.random.random(((1000, dim, dim + 1)))
reference = np.array([
M.dot(V.hom(v))
for v, M in zip(vectors, xforms)
])
result = V.transform(vectors, xforms)
assert result.shape == vectors.shape
npt.assert_allclose(reference, result)
# with translation, no perspective
vectors = np.random.random((1000, dim))
xforms = np.random.random(((1000, dim + 1, dim + 1)))
reference = np.array([
V.dehom(M.dot(V.hom(v)))
for v, M in zip(vectors, xforms)
])
result = V.transform(vectors, xforms)
assert result.shape == vectors.shape
npt.assert_allclose(reference, result)
def test_transform_one_matrix_many_vectors():
for dim in [2, 3, 4]:
# no translation:
vectors = np.random.random((1000, dim))
M = np.random.random(((dim, dim)))
reference = np.array([M.dot(v) for v in vectors])
result = V.transform(vectors, M)
assert result.shape == vectors.shape
npt.assert_allclose(reference, result)
# with translation, no perspective (e.g. the common 3x4 matrices)
vectors = | np.random.random((1000, dim)) | numpy.random.random |
# additional tests of the extended XYZ file I/O
# (which is also included in oi.py test case)
# maintained by <NAME> <<EMAIL>>
import os
from pathlib import Path
import numpy as np
import pytest
from extxyz.extxyz import read, write
# from ase.io.extxyz import key_val_str_to_dict, key_val_dict_to_str
# import ase.io
# from ase.io import extxyz
from ase.atoms import Atoms
from ase.build import bulk
# from ase.io.extxyz import escape
# from ase.calculators.calculator import compare_atoms
from ase.calculators.emt import EMT
# from ase.constraints import FixAtoms, FixCartesian
from ase.constraints import full_3x3_to_voigt_6_stress
from ase.build import molecule
# array data of shape (N, 1) squeezed down to shape (N, ) -- bug fixed
# in commit r4541
@pytest.fixture
def at():
return bulk('Si')
@pytest.fixture
def images(at):
images = [at, at * (2, 1, 1), at * (3, 1, 1)]
images[1].set_pbc([True, True, False])
images[2].set_pbc([True, False, False])
return images
def write_ats(filename, ats, vec_cell=False):
with open(filename, 'w') as fout:
for at in ats:
fout.write(f'{len(at)}\n')
if not vec_cell:
fout.write('Lattice="{} {} {} {} {} {} {} {} {}" Properties=species:S:1:pos:R:3 pbc=[{}, {}, {}]\n'.format(
*at.cell[0,:], *at.cell[1,:], *at.cell[2,:], *at.pbc))
else:
fout.write('\n')
for s, p in zip(at.symbols, at.positions):
fout.write('{} {} {} {}\n'.format(s, *p))
if vec_cell:
if at.pbc[0]:
fout.write('VEC1 {} {} {}\n'.format(*at.cell[0,:]))
if at.pbc[1]:
fout.write('VEC2 {} {} {}\n'.format(*at.cell[1,:]))
if at.pbc[2]:
fout.write('VEC3 {} {} {}\n'.format(*at.cell[2,:]))
# write sequence of images with different numbers of atoms
def test_sequence(tmp_path, images, helpers):
write_ats(tmp_path / 'multi.xyz', images)
for read_images in helpers.read_all_variants(tmp_path / 'multi.xyz'):
assert read_images == images
### no support for vec_cell
##def test_vec_cell(at, images):
## ase.io.write('multi.xyz', images, vec_cell=True)
## cell = images[1].get_cell()
## cell[-1] = [0.0, 0.0, 0.0]
## images[1].set_cell(cell)
## cell = images[2].get_cell()
## cell[-1] = [0.0, 0.0, 0.0]
## cell[-2] = [0.0, 0.0, 0.0]
## images[2].set_cell(cell)
## read_images = ase.io.read('multi.xyz', index=':')
## assert read_images == images
## # also test for vec_cell with whitespaces
## Path('structure.xyz').write_text("""1
## Coordinates
## C -7.28250 4.71303 -3.82016
## VEC1 1.0 0.1 1.1
## 1
##
## C -7.28250 4.71303 -3.82016
## VEC1 1.0 0.1 1.1
## """)
##
## a = ase.io.read('structure.xyz', index=0)
## b = ase.io.read('structure.xyz', index=1)
## assert a == b
##
## # read xyz containing trailing blank line
## # also test for upper case elements
## Path('structure.xyz').write_text("""4
## Coordinates
## MG -4.25650 3.79180 -2.54123
## C -1.15405 2.86652 -1.26699
## C -5.53758 3.70936 0.63504
## C -7.28250 4.71303 -3.82016
##
## """)
##
## a = ase.io.read('structure.xyz')
## assert a[0].symbol == 'Mg'
# read xyz with / and @ signs in key value
def test_read_slash(tmp_path, helpers):
(tmp_path / 'slash.xyz').write_text("""4
key1=a key2=a/b key3=a@b key4="a@b"
Mg -4.25650 3.79180 -2.54123
C -1.15405 2.86652 -1.26699
C -5.53758 3.70936 0.63504
C -7.28250 4.71303 -3.82016
""")
for a in helpers.read_all_variants(tmp_path / 'slash.xyz'):
assert a.info['key1'] == r'a'
assert a.info['key2'] == r'a/b'
assert a.info['key3'] == r'a@b'
assert a.info['key4'] == r'a@b'
def test_write_struct(tmp_path, helpers):
struct = Atoms(
'H4', pbc=[True, True, True],
cell=[[4.00759, 0.0, 0.0],
[-2.003795, 3.47067475, 0.0],
[3.06349683e-16, 5.30613216e-16, 5.00307]],
positions=[[-2.003795e-05, 2.31379473, 0.875437189],
[2.00381504, 1.15688001, 4.12763281],
[2.00381504, 1.15688001, 3.37697219],
[-2.003795e-05, 2.31379473, 1.62609781]],
)
struct.info = {'dataset': 'deltatest', 'kpoints': np.array([28, 28, 20]),
'identifier': 'deltatest_H_1.00',
'unique_id': '4cf83e2f89c795fb7eaf9662e77542c1'}
for fn in helpers.write_all_variants(tmp_path / 'tmp.xyz', struct):
assert helpers.approx_equal(read(fn), struct)
# Complex properties line. Keys and values that break with a regex parser.
# see https://gitlab.com/ase/ase/issues/53 for more info
def test_complex_key_val(tmp_path, helpers):
complex_xyz_string = (
' ' # start with a separator
'str=astring '
'quot="quoted value" '
'quote_special="a_to_Z_$%%^&*" '
r'escaped_quote="esc\"aped" '
#NB 'true_value ' bare key no longer valid
'false_value = F '
'integer=22 '
'floating=1.1 '
'int_array={1 2 3} '
'float_array="3.3 4.4" '
'virial="1 4 7 2 5 8 3 6 9" ' # special 3x3, fortran ordering
#NB TEMPORARY 'not_a_3x3_array="1 4 7 2 5 8 3 6 9" ' # should be left as a 9-vector
'Lattice=" 4.3 0.0 0.0 0.0 3.3 0.0 0.0 0.0 7.0 " ' # spaces in arr
'scientific_float=1.2e7 '
'scientific_float_2=5e-6 '
'scientific_float_array="1.2 2.2e3 4e1 3.3e-1 2e-2" '
'not_array="1.2 3.4 text" '
'bool_array={T F T F} '
'bool_array_2=" T, F, T " ' # leading spaces
'not_bool_array=[T F S] '
# read and write
# '\xfcnicode_key=val\xfce ' # fails on AppVeyor
'unquoted_special_value=a_to_Z_$%%^&* '
'2body=33.3 '
#NB 'hyphen-ated ' bare key no longer valid, but trying hyphenated key with value instead
'hyphen-ated=value '
# parse only
'many_other_quotes="4 8 12" '
'comma_separated="7, 4, -1" '
'bool_array_commas=[T, T, F, T] '
'Properties=species:S:1:pos:R:3 '
#NB 'multiple_separators ' bare keyword no longer valid, try with a value instead
'multiple_separators=val '
#NB 'double_equals=abc=xyz ' no longer allow bare = in value, try with quotes instead
'double_equals="abc=xyz" '
#NB 'trailing ' bare keyword no longer valid
'"with space"="a value" '
#NB r'space\"="a value" ' cannot backslash-escape quotes in bare string, try quoted instead
r'"space\""="a value" '
# tests of JSON functionality
'f_str_looks_like_array="[[1, 2, 3], [4, 5, 6]]" '
#NB 'f_float_array="_JSON [[1.5, 2, 3], [4, 5, 6]]" ' no _JSON support yet
#NB 'f_int_array="_JSON [[1, 2], [3, 4]]" ' no _JSON support yet
#NB 'f_bool_bare ' bare key no longer valid
'f_bool_value=F '
#NB 'f_dict={_JSON {"a" : 1}} ' no _JSON support yet
)
expected_dict = {
'str': 'astring',
'quot': "quoted value",
'quote_special': u"a_to_Z_$%%^&*",
'escaped_quote': 'esc"aped',
#NB 'true_value': True,
'false_value': False,
'integer': 22,
'floating': 1.1,
'int_array': np.array([1, 2, 3]),
'float_array': np.array([3.3, 4.4]),
'virial': np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
#NB 'not_a_3x3_array': np.array([1, 4, 7, 2, 5, 8, 3, 6, 9]),
'Lattice': np.array([[4.3, 0.0, 0.0],
[0.0, 3.3, 0.0],
[0.0, 0.0, 7.0]]),
'scientific_float': 1.2e7,
'scientific_float_2': 5e-6,
'scientific_float_array': np.array([1.2, 2200, 40, 0.33, 0.02]),
'not_array': "1.2 3.4 text",
'bool_array': np.array([True, False, True, False]),
'bool_array_2': np.array([True, False, True]),
'not_bool_array': 'T F S',
# '\xfcnicode_key': '<KEY>', # fails on AppVeyor
'unquoted_special_value': 'a_to_Z_$%%^&*',
'2body': 33.3,
#NB 'hyphen-ated': True,
'hyphen-ated': 'value',
'many_other_quotes': np.array([4, 8, 12]),
'comma_separated': np.array([7, 4, -1]),
'bool_array_commas': np.array([True, True, False, True]),
'Properties': 'species:S:1:pos:R:3',
'multiple_separators': 'val',
'double_equals': 'abc=xyz',
#NB 'trailing': True,
'with space': 'a value',
'space"': 'a value',
'f_str_looks_like_array': '[[1, 2, 3], [4, 5, 6]]',
#NB 'f_float_array': np.array([[1.5, 2, 3], [4, 5, 6]]),
#NB 'f_int_array': np.array([[1, 2], [3, 4]]),
#NB 'f_bool_bare': True,
'f_bool_value': False,
#NB 'f_dict': {"a": 1}
}
# parsed_dict = key_val_str_to_dict(complex_xyz_string)
# np.testing.assert_equal(parsed_dict, expected_dict)
# key_val_str = key_val_dict_to_str(expected_dict)
# parsed_dict = key_val_str_to_dict(key_val_str)
# np.testing.assert_equal(parsed_dict, expected_dict)
# Round trip through a file with complex line.
# Create file with the complex line and re-read it afterwards.
with open(tmp_path / 'complex.xyz', 'w', encoding='utf-8') as f_out:
f_out.write('1\n{}\nH 1.0 1.0 1.0'.format(complex_xyz_string))
for complex_atoms in helpers.read_all_variants(tmp_path / 'complex.xyz'):
# test all keys end up in info, as expected
for key, value in expected_dict.items():
if key in ['Properties', 'Lattice']:
continue # goes elsewhere
else:
np.testing.assert_equal(complex_atoms.info[key], value)
def test_write_multiple(at, images):
# write multiple atoms objects to one xyz
if os.path.exists('append.xyz'): os.unlink('append.xyz')
if os.path.exists('comp_append.xyz'): os.unlink('comp_append.xyz')
for atoms in images:
write('append.xyz', atoms, append=True)
# write('comp_append.xyz.gz', atoms, append=True)
write('not_append.xyz', atoms, append=False)
readFrames = read('append.xyz', index=slice(0, None))
assert readFrames == images
# readFrames = read('comp_append.xyz', index=slice(0, None))
# assert readFrames == images
singleFrame = read('not_append.xyz', index=slice(0, None))
assert singleFrame == images[-1]
# read xyz with blank comment line
def test_blank_comment(tmp_path, helpers):
(tmp_path / 'blankcomment.xyz').write_text("""4
Mg -4.25650 3.79180 -2.54123
C -1.15405 2.86652 -1.26699
C -5.53758 3.70936 0.63504
C -7.28250 4.71303 -3.82016
""")
for ai, a in enumerate(helpers.read_all_variants(tmp_path / 'blankcomment.xyz')):
assert a.info == { 'comment' : ''}
##def test_escape():
## assert escape('plain_string') == 'plain_string'
## assert escape('string_containing_"') == r'"string_containing_\""'
## assert escape('string with spaces') == '"string with spaces"'
# no writing and calculator reading yet
##@pytest.mark.filterwarnings('ignore:write_xyz')
def test_stress(tmp_path, helpers):
# build a water dimer, which has 6 atoms
water1 = molecule('H2O')
water2 = molecule('H2O')
water2.positions[:, 0] += 5.0
atoms = water1 + water2
atoms.cell = [10, 10, 10]
atoms.pbc = True
# array with clashing name
atoms.new_array('stress', np.arange(6, dtype=float))
atoms.calc = EMT()
a_stress = atoms.get_stress()
for fn in helpers.write_all_variants(tmp_path / 'tmp.xyz', atoms, write_calc=True):
for b in helpers.read_all_variants(fn, create_calc=True):
assert abs(b.get_stress() - a_stress).max() < 1e-6
assert abs(b.arrays['stress'] - | np.arange(6, dtype=float) | numpy.arange |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import scipy.stats
import matplotlib.pylab as plt
import os
import sys
from .context import vfe
from .context import config
import pdb
np.random.seed(42)
# We first define several utility functions
def kink_true(x):
fx = np.zeros(x.shape)
for t in range(x.shape[0]):
xt = x[t]
if xt < 4:
fx[t] = xt + 1
else:
fx[t] = -4*xt + 21
return fx
def kink(T, process_noise, obs_noise, xprev=None):
if xprev is None:
xprev = np.random.randn()
y = np.zeros([T, ])
x = np.zeros([T, ])
xtrue = np.zeros([T, ])
for t in range(T):
if xprev < 4:
fx = xprev + 1
else:
fx = -4*xprev + 21
xtrue[t] = fx
x[t] = fx + np.sqrt(process_noise)*np.random.randn()
xprev = x[t]
y[t] = x[t] + np.sqrt(obs_noise)*np.random.randn()
return xtrue, x, y
def plot_latent_kink(model, y, plot_title=''):
# make prediction on some test inputs
N_test = 200
x_test = np.linspace(-4, 6, N_test) / model.emi_layer.C[0, 0]
x_test = np.reshape(x_test, [N_test, 1])
zu = model.dyn_layer.zu
mu, vu = model.predict_f(zu)
mf, vf = model.predict_f(x_test)
my, vy = model.predict_y(x_test)
# plot function
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x_test[:,0], kink_true(x_test[:,0]), '-', color='k')
ax.plot(zu, mu, 'ob')
ax.plot(x_test[:,0], mf[:,0], '-', color='b')
ax.fill_between(
x_test[:,0],
mf[:,0] + 2*np.sqrt(vf[:,0]),
mf[:,0] - 2*np.sqrt(vf[:,0]),
alpha=0.2, edgecolor='b', facecolor='b')
ax.plot(model.emi_layer.C[0, 0]*x_test[:,0], my[:,0], '-', color='r')
ax.fill_between(
model.emi_layer.C[0, 0]*x_test[:,0],
my[:,0] + 2* | np.sqrt(vy[:,0]) | numpy.sqrt |
# coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities specifically with the ERFA ufuncs.
"""
import pytest
import numpy as np
from numpy.testing import assert_array_equal
import erfa
from erfa import ufunc as erfa_ufunc
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.introspection import minversion
ERFA_LE_2_0_0 = not minversion(erfa, '2.0.0.1')
class TestPVUfuncs:
def setup_class(self):
self.pv_unit = u.Unit('AU,AU/day')
self.pv_value = np.array([([1., 0., 0.], [0., 0.0125, 0.]),
([0., 1., 0.], [-.0125, 0., 0.])],
dtype=erfa_ufunc.dt_pv)
self.pv = self.pv_value << self.pv_unit
def test_cpv(self):
pv_copy = erfa_ufunc.cpv(self.pv)
assert_array_equal(pv_copy, self.pv)
assert not np.may_share_memory(pv_copy, self.pv)
def test_p2pv(self):
p2pv = erfa_ufunc.p2pv(self.pv['p'])
assert_array_equal(p2pv['p'], self.pv['p'])
assert_array_equal(p2pv['v'], np.zeros(self.pv.shape+(3,), float) << u.m/u.s)
@pytest.mark.xfail(erfa.__version__ <= '2.0.0',
reason='erfa bug; https://github.com/liberfa/pyerfa/issues/70)')
def test_p2pv_inplace(self):
# TODO: fix np.zeros_like.
out = np.zeros_like(self.pv_value) << self.pv_unit
p2pv = erfa_ufunc.p2pv(self.pv['p'], out=out)
assert out is p2pv
assert_array_equal(p2pv['p'], self.pv['p'])
assert_array_equal(p2pv['v'], np.zeros(self.pv.shape+(3,), float) << u.m/u.s)
def test_pv2p(self):
p = erfa_ufunc.pv2p(self.pv)
assert_array_equal(p, self.pv['p'])
out = np.zeros_like(p)
p2 = erfa_ufunc.pv2p(self.pv, out=out)
assert out is p2
assert_array_equal(p2, self.pv['p'])
def test_pv2s(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(self.pv.shape)) # latitude
assert r.unit == u.AU
assert_array_equal(r.value, np.ones(self.pv.shape))
assert td.unit == u.radian/u.day
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.day
assert_array_equal(pd.value, np.zeros(self.pv.shape))
assert rd.unit == u.AU/u.day
assert_array_equal(rd.value, np.zeros(self.pv.shape))
def test_pv2s_non_standard_units(self):
pv = self.pv_value << u.Unit('Pa,Pa/m')
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian/u.m
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa/u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
@pytest.mark.xfail(reason=(
'erfa ufuncs cannot take different names; it is not yet clear whether '
'this is changeable; see https://github.com/liberfa/pyerfa/issues/77'))
def test_pv2s_non_standard_names_and_units(self):
pv_value = np.array(self.pv_value, dtype=[('pos', 'f8'), ('vel', 'f8')])
pv = pv_value << u.Unit('Pa,Pa/m')
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian/u.m
assert_array_equal(td.value, np.array([0.0125]*2))
assert pd.unit == u.radian/u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa/u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
def test_s2pv(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
# On purpose change some of the units away from expected by s2pv.
pv = erfa_ufunc.s2pv(theta.to(u.deg), phi, r.to(u.m),
td.to(u.deg/u.day), pd, rd.to(u.m/u.s))
assert pv.unit == u.StructuredUnit('m, m/s', names=('p', 'v'))
assert_quantity_allclose(pv['p'], self.pv['p'], atol=1*u.m, rtol=0)
assert_quantity_allclose(pv['v'], self.pv['v'], atol=1*u.mm/u.s, rtol=0)
def test_pvstar(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype='i4'))
assert ra.unit == u.radian
assert_quantity_allclose(ra, [0, 90] * u.deg)
assert dec.unit == u.radian
assert_array_equal(dec.value, np.zeros(self.pv.shape)) # latitude
assert pmr.unit == u.radian/u.year
assert_quantity_allclose(pmr, [0.0125, 0.0125]*u.radian/u.day)
assert pmd.unit == u.radian/u.year
assert_array_equal(pmd.value, np.zeros(self.pv.shape))
assert px.unit == u.arcsec
assert_quantity_allclose(px, 1*u.radian)
assert rv.unit == u.km / u.s
assert_array_equal(rv.value, np.zeros(self.pv.shape))
def test_starpv(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
pv, stat = erfa_ufunc.starpv(ra.to(u.deg), dec.to(u.deg), pmr, pmd,
px, rv.to(u.m/u.s))
assert_array_equal(stat, np.zeros(self.pv.shape, dtype='i4'))
assert pv.unit == self.pv.unit
# Roundtrip is not as good as hoped on 32bit, not clear why.
# But proper motions are ridiculously high...
assert_quantity_allclose(pv['p'], self.pv['p'], atol=1*u.m, rtol=0)
assert_quantity_allclose(pv['v'], self.pv['v'], atol=1*u.m/u.s, rtol=0)
def test_pvtob(self):
pv = erfa_ufunc.pvtob([90, 0]*u.deg, 0.*u.deg, 100*u.km,
0*u.deg, 0*u.deg, 0*u.deg, 90*u.deg)
assert pv.unit == u.StructuredUnit('m, m/s', names=('p', 'v'))
assert pv.unit['v'] == u.m / u.s
assert_quantity_allclose(pv['p'], [[-6478, 0, 0], [0, 6478, 0]]*u.km,
atol=2*u.km)
assert_quantity_allclose(pv['v'], [[0, -0.5, 0], [-0.5, 0, 0]]*u.km/u.s,
atol=0.1*u.km/u.s)
def test_pvdpv(self):
pvdpv = erfa_ufunc.pvdpv(self.pv, self.pv)
assert pvdpv['pdp'].unit == self.pv.unit['p'] ** 2
assert pvdpv['pdv'].unit == self.pv.unit['p'] * self.pv.unit['v']
assert_array_equal(pvdpv['pdp'], np.einsum('...i,...i->...',
self.pv['p'], self.pv['p']))
assert_array_equal(pvdpv['pdv'], 2*np.einsum('...i,...i->...',
self.pv['p'], self.pv['v']))
z_axis = u.Quantity(
np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv),
'1,1/s')
pvdpv2 = erfa_ufunc.pvdpv(self.pv, z_axis)
assert pvdpv2['pdp'].unit == self.pv.unit['p']
assert pvdpv2['pdv'].unit == self.pv.unit['v']
assert_array_equal(pvdpv2['pdp'].value, np.zeros(self.pv.shape))
assert_array_equal(pvdpv2['pdv'].value, np.zeros(self.pv.shape))
def test_pvxpv(self):
pvxpv = erfa_ufunc.pvxpv(self.pv, self.pv)
assert pvxpv['p'].unit == self.pv.unit['p'] ** 2
assert pvxpv['v'].unit == self.pv.unit['p'] * self.pv.unit['v']
assert_array_equal(pvxpv['p'].value, np.zeros(self.pv['p'].shape))
assert_array_equal(pvxpv['v'].value, np.zeros(self.pv['v'].shape))
z_axis = u.Quantity(
| np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv) | numpy.array |
# -*- coding: UTF-8 -*-
"""
此脚本用于展示在不同参数下,正态分布的分布形状
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import bivariate_normal
def generateData(n, mean, cov):
"""
随机生成正态分布数据
"""
np.random.seed(2033)
data = | np.random.multivariate_normal(mean, cov, size=n) | numpy.random.multivariate_normal |
# -*- coding: utf-8 -*-
"""
Sections:
- import libraries and define functions
- loading all the data in a specific main folder into mainDataList
- load data corresponding to a specific experiment (subfolder or video) into variables
- load variables from postprocessed file corresponding to the specific experiment above
- some simple plots just to look at the data for one specific experiment
- cluster analysis
- some plots to look at pairwise data and cluster information.
- drawing clusters and saving into movies
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import cv2 as cv
import scipy.io
from scipy.io import loadmat
from sklearn.metrics import mutual_info_score
from scipy.spatial import distance as scipy_distance
from scipy.spatial import Voronoi as ScipyVoronoi
import progressbar
import os
import glob
import shelve
import scripts.functions_spinning_rafts as fsr
rootFolderNameFromWindows = r'D:\\VideoProcessingFolder' # r'E:\Data_Camera_Basler_acA800-510uc_coilSystem'
# rootFolderNameFromWindows = '/media/gardi/Seagate Backup Plus Drive/Data_Camera_Basler_acA800-510uc_coilSystem'
# rootFolderNameFromWindows = '/media/gardi/Elements/Data_Camera_Basler-acA2500-60uc'
# rootFolderNameFromWindows = '/media/gardi/Elements/Data_basler'
# rootFolderNameFromWindows = r'E:\Data_Camera_Basler-acA2500-60uc'
# rootFolderNameFromWindows = '/media/gardi/Elements/Data_Camera_Basler-acA2500-60uc/2018-10-09_o-D300-sym4-amp2-arcAngle30-Batch21Sep2018_Co500Au60_14mT_tiling_to be analyzed/processed'
# rootFolderNameFromWindows = '/media/gardi/Elements/Data_Camera_Basler-acA2500-60uc/2018-10-09_o-D300-sym4-amp2-arcAngle30-Batch21Sep2018_Co500Au60_14mT_tiling_to be analyzed/processed/processed'
# rootFolderNameFromWindows = '/media/gardi/Elements/Data_Camera_Basler-acA2500-60uc/2018-10-09_o-D300-sym4-amp2-arcAngle30-Batch21Sep2018_Co500Au60_14mT_tiling_to be analyzed'
# rootFolderNameFromWindows = '/media/gardi/Elements/Data_basler'
# rootFolderNameFromWindows = '/media/gardi/MPI-11/Data_basler'
# rootFolderNameFromWindows = '/media/gardi/Elements/Data_PhantomMiroLab140'
# rootFolderNameFromWindows = '/home/gardi/Rafts/Experiments Data/Data_PhantomMiroLab140'
# rootFolderNameFromWindows = '/media/gardi/MPI-Data9/Data_Basler-ace2500-60uc_coilsystem'
os.chdir(rootFolderNameFromWindows)
rootFolderTreeGen = os.walk(rootFolderNameFromWindows)
_, mainFolders, _ = next(rootFolderTreeGen)
# %% loading all the data in a specific main folder into mainDataList
# at the moment, it handles one main folder at a time.
# for mainFolderID in np.arange(0,1):
# os.chdir(mainFolders[mainFolderID])
mainFolderID = 0
os.chdir(mainFolders[mainFolderID])
dataFileList = glob.glob('*.dat')
dataFileList.sort()
dataFileListExcludingPostProcessed = dataFileList.copy()
numberOfPostprocessedFiles = 0
mainDataList = []
variableListsForAllMainData = []
for dataID in range(len(dataFileList)):
dataFileToLoad = dataFileList[dataID].partition('.dat')[0]
if 'postprocessed' in dataFileToLoad:
# the list length changes as items are deleted
del dataFileListExcludingPostProcessed[dataID - numberOfPostprocessedFiles]
numberOfPostprocessedFiles = numberOfPostprocessedFiles + 1
continue
tempShelf = shelve.open(dataFileToLoad)
variableListOfOneMainDataFile = list(tempShelf.keys())
expDict = {}
for key in tempShelf:
try:
expDict[key] = tempShelf[key]
except TypeError:
pass
tempShelf.close()
mainDataList.append(expDict)
variableListsForAllMainData.append(variableListOfOneMainDataFile)
# # go one level up to the root folder
# os.chdir('..')
# %% load data corresponding to a specific experiment (subfolder or video) into variables
dataID = 0
# explicitly load variables from data file
date = mainDataList[dataID]['date']
batchNum = mainDataList[dataID]['batchNum']
spinSpeed = mainDataList[dataID]['spinSpeed']
numOfRafts = mainDataList[dataID]['numOfRafts']
numOfFrames = mainDataList[dataID]['numOfFrames']
raftRadii = mainDataList[dataID]['raftRadii']
raftLocations = mainDataList[dataID]['raftLocations']
raftOrbitingCenters = mainDataList[dataID]['raftOrbitingCenters']
raftOrbitingDistances = mainDataList[dataID]['raftOrbitingDistances']
raftOrbitingAngles = mainDataList[dataID]['raftOrbitingAngles']
raftOrbitingLayerIndices = mainDataList[dataID]['raftOrbitingLayerIndices']
magnification = mainDataList[dataID]['magnification']
commentsSub = mainDataList[dataID]['commentsSub']
currentFrameGray = mainDataList[dataID]['currentFrameGray']
raftEffused = mainDataList[dataID]['raftEffused']
subfolderName = mainDataList[dataID]['subfolders'][mainDataList[dataID]['expID']]
variableListFromProcessedFile = list(mainDataList[dataID].keys())
# load the rest of the variables if necessary
for key, value in mainDataList[dataID].items(): # loop through key-value pairs of python dictionary
if not (key in globals()):
globals()[key] = value
outputDataFileName = date + '_' + str(numOfRafts) + 'Rafts_' + str(batchNum) + '_' + str(spinSpeed) + 'rps_' + str(
magnification) + 'x_' + commentsSub
# %% load all variables from postprocessed file corresponding to the specific experiment above
analysisType = 5 # 1: cluster, 2: cluster+Voronoi, 3: MI, 4: cluster+Voronoi+MI, 5: velocity/MSD + cluster + Voronoi
shelveDataFileName = date + '_' + str(numOfRafts) + 'Rafts_' + str(batchNum) + '_' + str(spinSpeed) + 'rps_' + str(
magnification) + 'x_' + 'postprocessed' + str(analysisType)
shelveDataFileExist = glob.glob(shelveDataFileName + '.dat')
if shelveDataFileExist:
print(shelveDataFileName + ' exists, load additional variables. ')
tempShelf = shelve.open(shelveDataFileName)
variableListFromPostProcessedFile = list(tempShelf.keys())
for key in tempShelf: # just loop through all the keys in the dictionary
globals()[key] = tempShelf[key]
tempShelf.close()
print('loading complete.')
elif len(shelveDataFileExist) == 0:
print(shelveDataFileName + ' does not exist')
# %% some simple plots just to look at the data for one specific experiment
# plot the center of mass
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
ax.plot(raftOrbitingCenters[:, 0], currentFrameGray.shape[1] - raftOrbitingCenters[:, 1])
fig.show()
# plot the center of mass, x and y coordinate separately
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
ax.plot(raftOrbitingCenters[:, 0], label='x')
ax.plot(raftOrbitingCenters[:, 1], label='y')
ax.legend()
fig.show()
# plot orbiting distances vs frame number
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
colors = plt.cm.viridis(np.linspace(0, 1, numOfRafts))
for i in range(0, numOfRafts):
ax.plot(np.arange(numOfFrames), raftOrbitingDistances[i, :], c=colors[i], label='{}'.format(i))
ax.set_xlim([0, numOfFrames])
ax.set_ylim([0, raftOrbitingDistances.max()])
ax.set_xlabel('Time (frame)', size=20)
ax.set_ylabel('distance to center of mass', size=20)
ax.set_title('distance to center of mass, {} Rafts'.format(numOfRafts), size=20)
ax.tick_params(axis='both', labelsize=18, width=2, length=10)
ax.legend()
fig.show()
# dfRaftOrbitingDistances = pd.DataFrame(np.transpose(raftOrbitingDistances))
# dfRaftOrbitingDistances.to_csv(outputDataFileName + '_distances.csv')
# plot orbiting Angles vs frame number
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
colors = plt.cm.viridis(np.linspace(0, 1, numOfRafts))
for i in range(0, numOfRafts):
ax.plot(np.arange(numOfFrames), raftOrbitingAngles[i, :], '-', c=colors[i], label='{}'.format(i))
ax.set_xlim([0, numOfFrames])
ax.set_ylim([raftOrbitingAngles.min(), raftOrbitingAngles.max()])
ax.set_xlabel('Frames(Time)', size=20)
ax.set_ylabel('raft orbiting angles', size=20)
ax.set_title('Raft orbiting angles, {} Rafts'.format(numOfRafts), size=20)
ax.tick_params(axis='both', labelsize=18, width=2, length=10)
ax.legend()
fig.show()
# dfRaftOrbitingAngles= pd.DataFrame(np.transpose(raftOrbitingAngles))
# dfRaftOrbitingAngles.to_csv(outputDataFileName + '_angles.csv')
# plt.close('all')
# %% cluster analysis
radius = raftRadii.mean() # pixel check raftRadii.mean()
scaleBar = 300 / radius / 2 # micron per pixel
raftPairwiseDistances = np.zeros((numOfRafts, numOfRafts, numOfFrames))
raftPairwiseEdgeEdgeDistancesSmallest = np.zeros((numOfRafts, numOfFrames))
raftPairwiseDistancesInRadius = np.zeros((numOfRafts, numOfRafts, numOfFrames))
raftPairwiseConnectivity = np.zeros((numOfRafts, numOfRafts, numOfFrames))
# using scipy distance module
t1 = time.perf_counter()
for frameNum in np.arange(numOfFrames):
raftPairwiseDistances[:, :, frameNum] = scipy_distance.cdist(raftLocations[:, frameNum, :],
raftLocations[:, frameNum, :], 'euclidean')
# smallest nonzero eedistances is assigned to one raft as the pairwise distance,
# connected rafts will be set to 0 later
raftPairwiseEdgeEdgeDistancesSmallest[:, frameNum] = np.partition(raftPairwiseDistances[:, :, frameNum], 1, axis=1)[
:, 1] - radius * 2
t2 = time.perf_counter()
timeTotal = t2 - t1 # in seconds
print(timeTotal)
raftPairwiseDistancesInRadius = raftPairwiseDistances / radius
# plot the histogram of pairwise distance in radius to look at the selection
# of radius value for thresholding connectivity
frameNumToLookAt = 0
raftPairwiseDistancesInRadius_oneFrame = raftPairwiseDistancesInRadius[:, :, frameNumToLookAt]
binsForPairwiseDisttances = np.arange(0, 5, 0.1)
count, edges = np.histogram(raftPairwiseDistancesInRadius_oneFrame, bins=binsForPairwiseDisttances)
fig, ax = plt.subplots(1, 1, figsize=(20, 10))
ax.bar(edges[:-1], count, align='edge', width=0.05)
ax.set_xlabel('pairwise distances', {'size': 15})
ax.set_ylabel('count', {'size': 15})
ax.set_title('histogram of pairwise distances of frame {}'.format(frameNumToLookAt), {'size': 15})
ax.legend(['pairwise distances'])
fig.show()
# re-adjust connectivity thresholding if necessary
# Caution: this way of determing clusters produces errors, mostly false positive.
connectivityThreshold = 2.3 # unit: radius
# re-thresholding the connectivity matrix.
# Note that the diagonal self-distance is zero, and needs to be taken care of seperately
raftPairwiseConnectivity = np.logical_and((raftPairwiseDistancesInRadius < connectivityThreshold),
(raftPairwiseDistancesInRadius > 0)) * 1
# to correct false positive, if the rafts are not connected in the next frame,
# then it is not connected in the present frame
for currentFrameNum in range(numOfFrames - 1):
raftAs, raftBs = np.nonzero(raftPairwiseConnectivity[:, :, currentFrameNum])
for raftA, raftB in zip(raftAs, raftBs):
if raftPairwiseConnectivity[raftA, raftB, currentFrameNum + 1] == 0:
raftPairwiseConnectivity[raftA, raftB, currentFrameNum] = 0
# information about clusters in all frames. For reach frame, the array has two columns,
# 1st col: cluster number, 2nd col: cluster size (excluding loners)
clusters = np.zeros((numOfRafts, 2, numOfFrames))
# clusterSizeCounts stores the number of clusters of each size for all frames.
# the first index is used as the size of the cluster
clusterSizeCounts = np.zeros((numOfRafts + 1, numOfFrames))
# fill in clusters matrix
t1 = time.perf_counter()
for frameNum in np.arange(numOfFrames):
clusterNum = 1
raftAs, raftBs = np.nonzero(raftPairwiseConnectivity[:, :, frameNum])
# determine the cluster number and store the cluster number in the first column
for raftA, raftB in zip(raftAs, raftBs):
# to see if A and B are already registered in the raftsInClusters
raftsInClusters = np.nonzero(clusters[:, 0, frameNum])
A = any(raftA in raft for raft in raftsInClusters)
B = any(raftB in raft for raft in raftsInClusters)
# if both are new, then it is a new cluster
if (A == False) and (B == False):
clusters[raftA, 0, frameNum] = clusterNum
clusters[raftB, 0, frameNum] = clusterNum
clusterNum += 1
# if one of them is new, then it is an old cluster
if (A == True) and (B == False):
clusters[raftB, 0, frameNum] = clusters[raftA, 0, frameNum]
if (A == False) and (B == True):
clusters[raftA, 0, frameNum] = clusters[raftB, 0, frameNum]
# if neither is new and if their cluster numbers differ,
# then change the larger cluster number to the smaller one
# note that this could lead to a cluster number being jumped over
if (A == True) and (B == True) and (clusters[raftA, 0, frameNum] != clusters[raftB, 0, frameNum]):
clusterNumLarge = max(clusters[raftA, 0, frameNum], clusters[raftB, 0, frameNum])
clusterNumSmall = min(clusters[raftA, 0, frameNum], clusters[raftB, 0, frameNum])
clusters[clusters[:, 0, frameNum] == clusterNumLarge, 0, frameNum] = clusterNumSmall
# Count the number of rafts in each cluster and store the cluster size in the second column
numOfClusters = clusters[:, 0, frameNum].max()
if numOfClusters > 0:
for clusterNum in np.arange(1, numOfClusters + 1):
clusterSize = len(clusters[clusters[:, 0, frameNum] == clusterNum, 0, frameNum])
clusters[clusters[:, 0, frameNum] == clusterNum, 1, frameNum] = clusterSize
raftPairwiseEdgeEdgeDistancesSmallest[np.nonzero(clusters[:, 0, frameNum]), frameNum] = 0
t2 = time.perf_counter()
timeTotal = t2 - t1 # in seconds
print(timeTotal)
# fill in clusterSizeCounts matrix
t1 = time.perf_counter()
for frameNum in np.arange(numOfFrames):
largestClusterSize = clusters[:, 1, frameNum].max()
# count loners
numOfLoners = len(clusters[clusters[:, 1, frameNum] == 0, 1, frameNum])
clusterSizeCounts[1, frameNum] = numOfLoners
# for the rest, the number of occurrence of cluster size in the 2nd column is the cluster size
# times the number of clusters of that size
for clusterSize in np.arange(2, largestClusterSize + 1):
numOfClusters = len(clusters[clusters[:, 1, frameNum] == clusterSize, 1, frameNum]) / clusterSize
clusterSizeCounts[int(clusterSize), frameNum] = numOfClusters
t2 = time.perf_counter()
timeTotal = t2 - t1 # in seconds
print(timeTotal)
# some averaging
dummyArray = np.arange((numOfRafts + 1) * numOfFrames).reshape((numOfFrames, -1)).T
dummyArray = np.mod(dummyArray, (numOfRafts + 1)) # rows are cluster sizes, and columns are frame numbers
clusterSizeAvgIncludingLoners = np.average(dummyArray, axis=0, weights=clusterSizeCounts)
clusterSizeAvgIncludingLonersAllFrames = clusterSizeAvgIncludingLoners.mean()
print('clusterSizeAvgIncludingLonersAllFrames = {:.4}'.format(clusterSizeAvgIncludingLonersAllFrames))
clusterSizeCountsExcludingLoners = clusterSizeCounts.copy()
clusterSizeCountsExcludingLoners[1, :] = 0
clusterSizeAvgExcludingLoners, sumOfWeights = np.ma.average(dummyArray, axis=0,
weights=clusterSizeCountsExcludingLoners, returned=True)
clusterSizeAvgExcludingLonersAllFrames = clusterSizeAvgExcludingLoners.mean()
print('clusterSizeAvgExcludingLonersAllFrames = {:.4} '.format(clusterSizeAvgExcludingLonersAllFrames))
raftPairwiseEdgeEdgeDistancesSmallestMean = raftPairwiseEdgeEdgeDistancesSmallest.mean() * scaleBar
raftPairwiseEdgeEdgeDistancesSmallestStd = raftPairwiseEdgeEdgeDistancesSmallest.std() * scaleBar
numOfLonersAvgAllFrames = clusterSizeCounts[1, :].mean()
print('raftPairwiseEdgeEdgeDistancesSmallestMean = {:.3} micron'.format(raftPairwiseEdgeEdgeDistancesSmallestMean))
print('raftPairwiseEdgeEdgeDistancesSmallestStd = {:.3} micron'.format(raftPairwiseEdgeEdgeDistancesSmallestStd))
print('average number of loners = {:.3}'.format(numOfLonersAvgAllFrames))
# %% some plots to look at pairwise data and cluster information.
# plot pairwise distance to a specific raft vs frame number
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
colors = plt.cm.jet(np.linspace(0, 1, numOfRafts))
raft1Num = 0
for raft2Num in range(0, numOfRafts):
ax.plot(np.arange(numOfFrames), raftPairwiseDistancesInRadius[raft1Num, raft2Num, :], c=colors[raft2Num],
label='{}'.format(raft2Num))
ax.legend(loc='best')
ax.set_xlim([0, numOfFrames])
ax.set_ylim([0, raftPairwiseDistancesInRadius[raft1Num, :, :].max()])
ax.set_xlabel('Frames(Time)', size=20)
ax.set_ylabel('distance to raft {}'.format(raft1Num), size=20)
ax.set_title('distance to raft {}, {} Rafts'.format(raft1Num, numOfRafts), size=20)
ax.tick_params(axis='both', labelsize=18, width=2, length=10)
fig.show()
# plot the size of the cluster one specific raft belongs to vs frame number
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
raftNum = 2
ax.plot(np.arange(numOfFrames), clusters[raftNum, 1, :])
ax.legend(loc='best')
ax.set_xlim([0, numOfFrames])
ax.set_ylim([0, clusters[raftNum, 1, :].max()])
ax.set_xlabel('Frames(Time)', size=20)
ax.set_ylabel('cluster size', size=20)
ax.set_title('the size of the cluster that include raft {}'.format(raftNum), size=20)
ax.tick_params(axis='both', labelsize=18, width=2, length=10)
fig.show()
# plot the number of clusters vs frame number
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
colors = plt.cm.jet(np.linspace(0, 1, numOfRafts))
ax.plot(np.arange(numOfFrames), np.count_nonzero(clusterSizeCounts, axis=0), label='num of clusters')
ax.legend(loc='best')
ax.set_xlim([0, numOfFrames])
ax.set_ylim([0, clusters[:, 0, :].max() + 0.5])
ax.set_xlabel('Frames(Time)', size=20)
ax.set_ylabel('cluster number', size=20)
ax.set_title('cluster number', size=20)
ax.tick_params(axis='both', labelsize=18, width=2, length=10)
fig.show()
# plot the number of clusters with 2, 3, 4, ... rafts vs frame number
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
rows, _ = np.nonzero(clusterSizeCounts)
maxRaftsInACluster = rows.max()
colors = plt.cm.jet(np.linspace(0, 1, maxRaftsInACluster + 1))
for numOfRaftInACluster in range(1, maxRaftsInACluster + 1):
ax.plot(np.arange(numOfFrames), clusterSizeCounts[numOfRaftInACluster, :], c=colors[numOfRaftInACluster],
label='{}'.format(numOfRaftInACluster))
ax.legend(loc='best')
ax.set_xlim([0, numOfFrames])
ax.set_ylim([0, clusterSizeCounts.max() + 0.5])
ax.set_xlabel('Time(Frames)', size=20)
ax.set_ylabel('cluster count'.format(raft1Num), size=20)
ax.set_title(' the counts of clusters of various sizes over time', size=20)
ax.tick_params(axis='both', labelsize=18, width=2, length=10)
fig.show()
# plot average cluster sizes vs frame number
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))
colors = plt.cm.jet(np.linspace(0, 1, numOfRafts))
ax.plot( | np.arange(numOfFrames) | numpy.arange |
import numpy as np
from algos import learn, policy
from env import LoopEnv
from utils import sample_demos, prob_dists
import argparse
import copy
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
from mdp.borlangeworld import BorlangeWorld
import multiprocessing as mp
def get_args():
parser = argparse.ArgumentParser(description='Bayesian Inverse Reinforcement Learning')
parser.add_argument('--policy', '-p', choices=('eps', 'bol'))
parser.add_argument('--alpha', '-a', default=1, type=float, help='1/temperature of boltzmann distribution, '
'larger value makes policy close to the greedy')
parser.add_argument('--env_id', default=55, type=int)
parser.add_argument('--r_max', default=10, type=float)
parser.add_argument('--gamma', default=0.9, type=float)
parser.add_argument('--n_iter', default=500, type=int)
parser.add_argument('--burn_in', default=50, type=int)
parser.add_argument('--dist', default='multiuniformborlange', type=str,
choices=['uniform', 'gaussian', 'beta', 'gamma', 'multigauss','multigaussBorlange','multiuniformborlange'])
return parser.parse_args()
def bayesian_irl(env, demos, step_size, n_iter, r_max, prior, alpha, gamma, burn_in, sample_freq,ptrial):
assert burn_in <= n_iter
sampled_rewards = np.array(list(policy_walk(**locals()))[burn_in::sample_freq])
return sampled_rewards
def policy_walk(env, demos, step_size, n_iter, r_max, prior, alpha, gamma, ptrial, **kwargs):
assert r_max > 0, 'r_max must be positive'
# step 1
weights = sample_random_rewards(env.n_states, step_size, r_max, ptrial)
env.set_reward(weights)
# step 2
# pi = learn.policy_iteration(env, gamma)
pi, q = env.get_policy()
# step 3
for _ in tqdm(range(n_iter)):
env_tilda = copy.deepcopy(env)
tilda_weights = mcmc_reward_step(env.weights, step_size, r_max)
env_tilda.set_reward(tilda_weights)
pi_tilda, q_pi_r_tilda = env_tilda.get_policy()
# q_pi_r_tilda = learn.compute_q_for_pi(env, pi, gamma)
if is_not_optimal(q_pi_r_tilda, pi):
# pi_tilda = learn.policy_iteration(env_tilda, gamma, pi)
if np.random.random() < compute_ratio(demos, env_tilda, pi_tilda, env, pi, prior, alpha, gamma):
env, pi = env_tilda, pi_tilda
else:
if np.random.random() < compute_ratio(demos, env_tilda, pi, env, pi, prior, alpha, gamma):
env = env_tilda
yield env.weights
def is_not_optimal(q_values, pi):
return np.any(
q_values[np.arange(q_values.shape[0]).tolist(), np.argmax(pi, axis=1).tolist()] < np.argmax(q_values, axis=1))
def compute_ratio(demos, env_tilda, pi_tilda, env, pi, prior, alpha, gamma):
ln_p_tilda = compute_posterior(demos, env_tilda, pi_tilda, prior, alpha, gamma)
ln_p = compute_posterior(demos, env, pi, prior, alpha, gamma)
ratio = np.exp(ln_p_tilda - ln_p)
return ratio
def compute_posterior(demos, env, pi, prior, alpha, gamma):
ln_p = np.sum([np.log(pi[s, a]) for s, a in demos]) + np.log(prior(env.weights))
return ln_p
def mcmc_reward_step(weights, step_size, r_max):
noweight = True
while (noweight):
new_weights = np.random.uniform((-2.5, -2.5), (0., 0.), size=(1, 2)).squeeze()
noweight = (np.linalg.norm(new_weights[0] - weights[0]) > step_size[0]) or (
np.linalg.norm(new_weights[1] - weights[1]) > step_size[1])
return new_weights
def sample_random_rewards(n_states, step_size, r_max, ptrial):
"""
sample random rewards form gridpoint(R^{n_states}/step_size).
:param n_states:
:param step_size:
:param r_max:
:return: sampled rewards
"""
allrew = np.load("Data/vborlange/myinitpoints.npy")
rewards = allrew[ptrial%allrew.shape[0],0:2]
return rewards
def prepare_prior(dist, r_max):
prior = getattr(prob_dists, dist[0].upper() + dist[1:] + 'Dist')
print(prior)
if dist == 'uniform':
return prior(xmax=r_max)
elif dist == 'gaussian':
return prior()
elif dist in {'beta', 'gamma'}:
return prior(loc=-r_max, scale=1 / (2 * r_max))
elif dist == 'multigauss':
return prior(dist)
elif dist == "multigaussBorlange":
return prior(dist)
elif dist == "multiuniformborlange":
return prior()
else:
raise NotImplementedError('{} is not implemented.'.format(dist))
def main(args,t):
np.random.seed(5)
env = BorlangeWorld(destination=7622, horizon=100,discount=0.99, loadres=True)
demos = np.load(os.path.join("Data/vborlange/full_opt_trajectories.npy"))
demos = demos.reshape((-1, 2))
# run birl
# prior = prepare_prior(args.dist, args.r_max)
prior = prepare_prior(args.dist, args.r_max)
sampled_rewards = bayesian_irl(env, demos, step_size=[0.05, 0.05, 0.05], n_iter=args.n_iter, r_max=args.r_max,
prior=prior,
alpha=args.alpha, gamma=env.discount, burn_in=args.burn_in, sample_freq=1, ptrial=t)
saveprocdir = "Results/Borlange"
os.makedirs(saveprocdir, exist_ok=True)
np.save(os.path.join(saveprocdir, "rewards%d.npy") % t, sampled_rewards)
return sampled_rewards
def runmain(t,output):
args = get_args()
myweights = main(args,t)
output.put((t,myweights))
output = mp.Queue()
# Setup a list of processes that we want to run
processes = [mp.Process(target=runmain, args=(w,output)) for w in np.arange(10)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
# Get process results from the output queue
results = [output.get() for p in processes]
| np.save("Borlange_parallel.npy",results) | numpy.save |
import numpy as np
from pyquaternion import Quaternion
def so3_vee(Phi):
if Phi.ndim < 3:
Phi = np.expand_dims(Phi, axis=0)
if Phi.shape[1:3] != (3, 3):
raise ValueError("Phi must have shape ({},{}) or (N,{},{})".format(3, 3, 3, 3))
phi = np.empty([Phi.shape[0], 3])
phi[:, 0] = Phi[:, 2, 1]
phi[:, 1] = Phi[:, 0, 2]
phi[:, 2] = Phi[:, 1, 0]
return np.squeeze(phi)
def so3_wedge(phi):
phi = np.atleast_2d(phi)
if phi.shape[1] != 3:
raise ValueError(
"phi must have shape ({},) or (N,{})".format(3, 3))
Phi = np.zeros([phi.shape[0], 3, 3])
Phi[:, 0, 1] = -phi[:, 2]
Phi[:, 1, 0] = phi[:, 2]
Phi[:, 0, 2] = phi[:, 1]
Phi[:, 2, 0] = -phi[:, 1]
Phi[:, 1, 2] = -phi[:, 0]
Phi[:, 2, 1] = phi[:, 0]
return np.squeeze(Phi)
def so3_log(matrix):
cos_angle = 0.5 * np.trace(matrix) - 0.5
cos_angle = np.clip(cos_angle, -1., 1.)
angle = np.arccos(cos_angle)
if np.isclose(angle, 0.):
return so3_vee(matrix - np.identity(3))
else:
return so3_vee((0.5 * angle / np.sin(angle)) * (matrix - matrix.T))
def so3_left_jacobian(phi):
angle = np.linalg.norm(phi)
if np.isclose(angle, 0.):
return np.identity(3) + 0.5 * so3_wedge(phi)
axis = phi / angle
s = np.sin(angle)
c = np.cos(angle)
return (s / angle) * np.identity(3) + \
(1 - s / angle) * np.outer(axis, axis) + \
((1 - c) / angle) * so3_wedge(axis)
def se3_curlywedge(xi):
xi = np.atleast_2d(xi)
Psi = np.zeros([xi.shape[0], 6, 6])
Psi[:, 0:3, 0:3] = so3_wedge(xi[:, 3:6])
Psi[:, 0:3, 3:6] = so3_wedge(xi[:, 0:3])
Psi[:, 3:6, 3:6] = Psi[:, 0:3, 0:3]
return np.squeeze(Psi)
def se3_left_jacobian_Q_matrix(xi):
rho = xi[0:3] # translation part
phi = xi[3:6] # rotation part
rx = so3_wedge(rho)
px = so3_wedge(phi)
ph = np.linalg.norm(phi)
ph2 = ph * ph
ph3 = ph2 * ph
ph4 = ph3 * ph
ph5 = ph4 * ph
cph = np.cos(ph)
sph = np.sin(ph)
m1 = 0.5
m2 = (ph - sph) / ph3
m3 = (0.5 * ph2 + cph - 1.) / ph4
m4 = (ph - 1.5 * sph + 0.5 * ph * cph) / ph5
t1 = rx
t2 = px.dot(rx) + rx.dot(px) + px.dot(rx).dot(px)
t3 = px.dot(px).dot(rx) + rx.dot(px).dot(px) - 3. * px.dot(rx).dot(px)
t4 = px.dot(rx).dot(px).dot(px) + px.dot(px).dot(rx).dot(px)
return m1 * t1 + m2 * t2 + m3 * t3 + m4 * t4
def se3_left_jacobian(xi):
rho = xi[0:3] # translation part
phi = xi[3:6] # rotation part
# Near |phi|==0, use first order Taylor expansion
if np.isclose(np.linalg.norm(phi), 0.):
return np.identity(6) + 0.5 * se3_curlywedge(xi)
so3_jac = so3_left_jacobian(phi)
Q_mat = se3_left_jacobian_Q_matrix(xi)
jac = np.zeros([6, 6])
jac[0:3, 0:3] = so3_jac
jac[0:3, 3:6] = Q_mat
jac[3:6, 3:6] = so3_jac
return jac
def se3_inv_left_jacobian(xi):
rho = xi[0:3] # translation part
phi = xi[3:6] # rotation part
# Near |phi|==0, use first order Taylor expansion
if np.isclose(np.linalg.norm(phi), 0.):
return np.identity(6) - 0.5 * se3_curlywedge(xi)
so3_inv_jac = so3_inv_left_jacobian(phi)
Q_mat = se3_left_jacobian_Q_matrix(xi)
jac = np.zeros([6, 6])
jac[0:3, 0:3] = so3_inv_jac
jac[0:3, 3:6] = -so3_inv_jac.dot(Q_mat).dot(so3_inv_jac)
jac[3:6, 3:6] = so3_inv_jac
return jac
def so3_inv_left_jacobian(phi):
angle = | np.linalg.norm(phi) | numpy.linalg.norm |
import numpy as np
import numpy.matlib as npmat
import math
def my_phantomgallery( phantom_type ):
"""
Calculates the matrix of the elements of the phantom given its type.
Parameters
----------
phantom_type: 'ellipses' (or 'shepp_logan'),'modified_shepp_logan','squares','rectangles'
Returns
-------
M : matrix of the elements of the phantom
"""
if phantom_type == 'ellipses' or phantom_type == 'shepp_logan':
# [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]
M = np.array([[ .69, .92, 0, 0, 0, 1.],
[ .6624, .8740, 0, -.0184, 0, -0.8],
[ .1100, .3100, .22, 0, -18, -.2],
[ .1600, .4100, -.22, 0, 18, -.2],
[ .2100, .2500, 0, .35, 0, .1],
[ .0460, .0460, 0, .1, 0, .1],
[ .0460, .0460, 0, -.1, 0, .1],
[ .0460, .0230, -.08, -.605, 0, .1],
[ .0230, .0230, 0, -.605, 0, .1],
[ .0230, .0460, .06, -.605, 0, .1]])
elif phantom_type == 'modified_shepp_logan':
# [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]
p1 = [.7, .8, 0, 0, 0, 1]
p2 = [.65,.75,0,0,0,-.9]
p3 = [.15,.2,0,.4,0,.5]
p4 = [.25,.15,-.25,.25,135.79,.2]
p5 = [.25,.15,.25,.25,45.26,.2]
p6 = [.08,.25,0,-.3,28.65,.65]
p7 = [.05,.05,.5,-.3,0,.8]
# combine into a matrix with one ellipse in each row
M = np.array([p1, p2, p3, p4, p5, p6, p7]);
elif phantom_type == 'squares':
# [x center, y center, edge length ,phi=angle (degrees), greyscale=attenuation]
s1 = [0,0,1.3,0,1]
s2 = [0,0,1.1,0,-.9]
s3 = [.1,-.1,.5,180/6,.4]
s4 = [-.25,.15,.25,180/4,.2]
s5 = [-.2,.25,.3,180/3,.4]
#combine into a matrix with one square in each row
M = np.array([s1, s2, s3, s4, s5]);
elif (phantom_type == 'rectangles'):
# [x center, y center, dimension 1, dimension 2, phi=angle (degrees), greyscale=attenuation]
r1 = [0,0,1.3,1.1,0,1]
r2 = [0,0,1.2,1,0,-.9]
r3 = [0.25,.15,.25,.6,180/6,.4]
r4 = [-.2,.1,.25,.20,180/4,.2]
r5 = [-.3,.2,.3,.2,180/6,.4]
#combine into a matrix with one square in each row
M = np.array([r1, r2, r3, r4, r5])
else:
print('Unknown phantom_type')
M = None
return M
def phantom_ellipses(n_points,E):
"""
Function that create the phantom image of 'ellipses' type, from the matrix of the elements and given the number of pixels.
Parameters
----------
n_points: number of pixels on each row and column
E: matrix of the elements of the phantom
Returns
-------
phantom : phantom image
"""
#Rescaling according to image size
E[:,0] = E[:,0]*n_points/2 #semiaxis a
E[:,1] = E[:,1]*n_points/2 #semiaxis b
E[:,2] = E[:,2]*n_points/2 #x
E[:,3] = E[:,3]*n_points/2 #y
E[:,4] = E[:,4]*math.pi/180
x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )
nrow,ncol = E.shape
phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))
for k in range(nrow): #itero sulle ellissi
x_new = x - E[k,2]
y_new = y - E[k,3]
#find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1);
cosp = math.cos(E[k,4])
sinp = math.sin(E[k,4])
cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \
np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if (cond[i,j] <= 0.0):
phantom1[i,j,k] = E[k,5]; # gray scale
else:
phantom1[i,j,k] = 0.0;
#endif
#endfor
#endfor
#endfor
phantom1 = phantom1.sum(axis=2)
phantom = np.flipud(phantom1)
return phantom
def phantom_squares(n_points,S):
"""
Function that create the phantom image of 'squares' type, from the matrix of the elements and given the number of pixels.
Parameters
----------
n_points: number of pixels on each row and column
S: matrix of the elements of the phantom
Returns
-------
phantom : phantom image
"""
#Rescaling according to image size
S[:,0] = S[:,0]*n_points/2
S[:,1] = S[:,1]*n_points/2
S[:,2] = S[:,2]*n_points/2
S[:,3] = S[:,3]*math.pi/180
x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )
nrow,ncol = S.shape
phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))
for k in range(nrow): #itero sui quadrati
x_new = x - S[k,0]
y_new = y - S[k,1]
u = abs(x_new*math.cos(S[k,3])+y_new*math.sin(S[k,3]))
v = abs(-x_new*math.sin(S[k,3])+y_new*math.cos(S[k,3]))
cond = np.maximum(u,v)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if (cond[i,j] < S[k,2]/2):
phantom1[i,j,k] = S[k,4]; # gray scale
else:
phantom1[i,j,k] = 0.0;
#endif
#endfor
#endfor
#endfor
phantom1 = phantom1.sum(axis=2)
phantom = np.flipud(phantom1)
return phantom
def phantom_rectangles(n_points,R):
"""
Function that create the phantom image of 'rectangles' type, from the matrix of the elements and given the number of pixels.
Parameters
----------
n_points: number of pixels on each row and column
R: matrix of the elements of the phantom
Returns
-------
phantom : phantom image
"""
#Rescaling according to image size
R[:,0] = R[:,0]*n_points/2
R[:,1] = R[:,1]*n_points/2
R[:,2] = R[:,2]*n_points/2
R[:,3] = R[:,3]*n_points/2
R[:,4] = R[:,4]*math.pi/180
x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )
nrow,ncol = R.shape
phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))
for k in range(nrow): #itero sui rettangoli
x_new = x - R[k,0]
y_new = y - R[k,1]
u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))
v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):
phantom1[i,j,k] = R[k,5]; # gray scale
else:
phantom1[i,j,k] = 0.0;
#endif
#endfor
#endfor
#endfor
phantom1 = phantom1.sum(axis=2)
phantom = np.flipud(phantom1)
return phantom
def my_radon_analytic(phantom_type, N, theta_vec, M , tvec_set=None, circle=False ):
"""
Function that returns the analytical_sinogram given the phantom.
Parameters
----------
phantom_type : type of the phantom ('ellipses', 'shepp_logan', 'modified_shepp_logan','squares'rectangles')
theta_vec : list of the angles
M : matrix of the structure of the phantom
tvec_set : vector of the t values given from by the user
circle : as in the function iradon of scikit-image "assume the reconstructed image is zero outside the inscribed circle. Also changes the default output_size to match the behaviour of radon called with circle=True."
Returns
-------
analytical_sinogram : Analytical Sinogram of the given phantom
"""
if phantom_type in ['ellipses', 'shepp_logan', 'modified_shepp_logan']:
analytical_sinogram = radon_ellipses(N,theta_vec,M, tvec_set,circle);
elif phantom_type== 'squares':
analytical_sinogram = radon_squares(N,theta_vec,M, circle);
elif phantom_type== 'rectangles':
analytical_sinogram = radon_rectangles(N,theta_vec,M, circle);
else:
print('error on the choice of phantom type')
#endif
return analytical_sinogram
def radon_ellipses(N,theta_vec, E, tvec_set=None, circle=False):
"""
Function that compute the analytical_sinogram for phantoms of ellipses type
Parameters
----------
N : Pixels per dimension of the image
theta_vec : vector of the angles theta
E : matrix of the ellipses parameters
tvec_set : vector of the t values given from by the user
circle : as in the function iradon of scikit-image "assume the reconstructed image is zero outside the inscribed circle. Also changes the default output_size to match the behaviour of radon called with circle=True."
Returns
-------
analytical_sinogram : Analytical Sinogram
"""
#Rescaling according to image size
E[:,0] = E[:,0]*N/2
E[:,1] = E[:,1]*N/2
E[:,2] = E[:,2]*N/2
E[:,3] = E[:,3]*N/2
E[:,4] = E[:,4]*math.pi/180
[t_vec, grid_t, grid_theta] = build_t_theta_pixel(N, theta_vec, tvec_set=tvec_set, circle =circle);
(nrowE,ncolE) = E.shape;
tmp = np.zeros((nrowE,len(grid_theta)))
for i in range(nrowE):
grid_theta_new = grid_theta - E[i,4]
x_new = (E[i,2]*np.cos(grid_theta)+E[i,3]*np.sin(grid_theta))
y_new = (-E[i,2]*np.sin(grid_theta)+E[i,3]*np.cos(grid_theta))
grid_t_new = (grid_t -x_new)/E[i,1]
v1 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2 - grid_t_new**2
cond = v1;
v2 = np.zeros((v1.shape[0],1))
for j in range (len(grid_theta)):
if cond[j] > 0:
v2[j]=1
else:
v2[j]=0
#endif
#endfor
v3 = np.sqrt(v1*v2);
v4 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2
tmp[i,:] = np.transpose( 2*E[i,0]*E[i,5]*(v3/v4) )
#endfor
radvec = np.sum(tmp,axis = 0);
analytical_sinogram = np.transpose(np.reshape(radvec,(len(theta_vec),len(t_vec))))
return analytical_sinogram
def radon_squares(N,theta_vec,S, circle=False):
"""
Function that compute the analytical_sinogram for phantoms of square type
Parameters
----------
N : dimension of the image
theta_vec : list of the angles
S : matrix of the squares parameters
circle : as in the function iradon of scikit-image "assume the reconstructed image is zero outside the inscribed circle. Also changes the default output_size to match the behaviour of radon called with circle=True."
Returns
-------
analytical_sinogram : Analytical Sinogram
"""
#Rescaling according to image size
S[:,0] = S[:,0]*N/2
S[:,1] = (S[:,1])*N/2
S[:,2] = (S[:,2])*N/2
S[:,3] = S[:,3]*math.pi/180
[t_vec, grid_t, grid_theta] = build_t_theta_pixel(N,theta_vec, circle = circle);
[nrow,ncol] = np.shape(S);
tmp = np.zeros((nrow,len(grid_theta)));
for i in range(nrow): # cycle on the elements of the phantom
grid_theta_new = grid_theta - S[i,3];
grid_t_new = (grid_t - S[i,0]* np.cos(grid_theta) - S[i,1]*np.sin(grid_theta))*2/S[i,2];
for j in range(len(grid_theta)): # angles
theta_new = grid_theta_new[j]
t_new = grid_t_new[j]
if theta_new == 0:
if abs(t_new)< 1:
v1= -1;
v2= 1;
else:
v1= 0;
v2= 0;
#endif
else:
v1= (t_new*np.cos(theta_new)-1)/np.sin(theta_new);
v2= (t_new*np.cos(theta_new)+1)/np.sin(theta_new);
#endif
if theta_new == np.pi/2:
if abs(t_new)< 1:
h1= -1;
h2= 1;
else:
h1= 0;
h2= 0;
#endif
else:
h1 = (1-t_new*np.sin(theta_new))/np.cos(theta_new);
h2 = (-1-t_new*np.sin(theta_new))/np.cos(theta_new);
#endif
vmax= np.maximum(v1,v2); # scalar values
vmin= np.minimum(v1,v2);
hmax= np.maximum(h1,h2);
hmin= np.minimum(h1,h2);
entryval= np.maximum(vmin,hmin);
exitval= | np.minimum(vmax,hmax) | numpy.minimum |
import numpy as np
import torch
import itertools
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from skimage.measure import compare_psnr, compare_ssim
from skimage.restoration import denoise_nl_means, estimate_sigma
import skimage.io as sio
from glow.glow import Glow
from dcgan.dcgan import Generator
import json
import os
import warnings
warnings.filterwarnings("ignore")
def solveInpainting(args):
if args.prior == 'glow':
GlowInpaint(args)
elif args.prior == 'dcgan':
GANInpaint(args)
elif args.prior == 'glowred':
GlowREDInpaint(args)
else:
raise "prior not defined correctly"
def np_to_torch(img_np):
"""Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
"""
# return torch.from_numpy(img_np)[None, :].float().cuda()
return torch.from_numpy(img_np).float().cuda()
def torch_to_np(img_torch):
"""Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
"""
return img_torch.detach().cpu().numpy() # add [0] later
def Denoiser(d_name, sigma_f, x_f):
x = torch_to_np(x_f)
if d_name == 'nlm':
patch_kw = dict(patch_size=5, # 5x5 patches
patch_distance=6, # 13x13 search area
multichannel=True)
s0 = np.mean(estimate_sigma(x[0], multichannel=True))
s1 = np.mean(estimate_sigma(x[1], multichannel=True))
x0 = denoise_nl_means(x[0], h=s0, sigma=s0, fast_mode=False, **patch_kw)
x1 = denoise_nl_means(x[1], h=s1, sigma=s1, fast_mode=False, **patch_kw)
x = np.stack([x0, x1])
else:
raise "other denoisers not implemented"
x_f = np_to_torch(x)
return x_f
import itertools
from pprint import pprint
inputdata = [
['a', 'b', 'c'],
['d'],
['e', 'f'],
]
result = list(itertools.product(*inputdata))
def GlowREDInpaint(args):
# loopOver = zip(args.gamma)
hyperparams = [args.gamma, args.alpha, args.beta]
loopOver = list(itertools.product(*hyperparams))
for gamma, alpha, beta in loopOver:
skip_to_next = False # flag to skip to next loop if recovery is fails due to instability
n = args.size * args.size * 3
modeldir = "./trained_models/%s/glow" % args.model
test_folder = "./test_images/%s" % args.dataset
save_path = "./results/%s/%s" % (args.dataset, args.experiment)
# loading dataset
trans = transforms.Compose([transforms.Resize((args.size, args.size)), transforms.ToTensor()])
test_dataset = datasets.ImageFolder(test_folder, transform=trans)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batchsize, drop_last=False,
shuffle=False)
# loading glow configurations
config_path = modeldir + "/configs.json"
with open(config_path, 'r') as f:
configs = json.load(f)
# regularizor
gamma = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
# alpha = args.alpha
# beta = args.beta
# getting test images
Original = []
Recovered = []
Masked = []
Mask = []
Residual_Curve = []
for i, data in enumerate(test_dataloader):
# getting batch of data
x_test = data[0]
x_test = x_test.clone().to(device=args.device)
n_test = x_test.size()[0]
assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
# generate mask
mask = gen_mask(args.inpaint_method, args.size, args.mask_size)
mask = np.array([mask for i in range(n_test)])
mask = mask.reshape([n_test, 1, args.size, args.size])
mask = torch.tensor(mask, dtype=torch.float, requires_grad=False, device=args.device)
# loading glow model
glow = Glow((3, args.size, args.size),
K=configs["K"], L=configs["L"],
coupling=configs["coupling"],
n_bits_x=configs["n_bits_x"],
nn_init_last_zeros=configs["last_zeros"],
device=args.device)
glow.load_state_dict(torch.load(modeldir + "/glowmodel.pt"))
glow.eval()
# making a forward to record shapes of z's for reverse pass
_ = glow(glow.preprocess(torch.zeros_like(x_test)))
# initializing z from Gaussian
if args.init_strategy == "random":
z_sampled = np.random.normal(0, args.init_std, [n_test, n])
z_sampled = torch.tensor(z_sampled, requires_grad=True, dtype=torch.float, device=args.device)
# initializing z from image with noise filled only in masked region
elif args.init_strategy == "noisy_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0, 0.2, x_noisy_filled.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
noise = noise * (1 - mask)
x_noisy_filled = x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with masked region inverted
elif args.init_strategy == "inverted_filled":
x_inverted_filled = x_test.clone().detach()
missing_x = x_inverted_filled.clone()
missing_x = missing_x.data.cpu().numpy()
missing_x = missing_x[:, :, ::-1, ::-1]
missing_x = torch.tensor(missing_x.copy(), dtype=torch.float, device=args.device)
missing_x = (1 - mask) * missing_x
x_inverted_filled = x_inverted_filled * mask
x_inverted_filled = x_inverted_filled + missing_x
z, _, _ = glow(x_inverted_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from masked image ( masked region as zeros )
elif args.init_strategy == "black_filled":
x_black_filled = x_test.clone().detach()
x_black_filled = mask * x_black_filled
x_black_filled = x_black_filled * mask
z, _, _ = glow(x_black_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from noisy complete image
elif args.init_strategy == "noisy":
x_noisy = x_test.clone().detach()
noise = np.random.normal(0, 0.05, x_noisy.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
x_noisy = x_noisy + noise
x_noisy = torch.clamp(x_noisy, 0, 1)
z, _, _ = glow(x_noisy - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with only noise in masked region
elif args.init_strategy == "only_noise_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0, 0.2, x_noisy_filled.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
noise = noise * (1 - mask)
x_noisy_filled = mask * x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
else:
raise "Initialization strategy not defined"
# selecting optimizer
if args.optim == "adam":
optimizer = torch.optim.Adam([z_sampled], lr=args.lr, )
elif args.optim == "lbfgs":
optimizer = torch.optim.LBFGS([z_sampled], lr=args.lr, )
# metrics to record over training
psnr_t = torch.nn.MSELoss().to(device=args.device)
residual = []
x_f = (x_test * mask).clone()
u = torch.zeros_like(x_test)
# running optimizer steps
for t in range(args.steps):
def closure():
optimizer.zero_grad()
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_masked_test = x_test * mask
x_masked_gen = x_gen * mask
global residual_t
residual_t = ((x_masked_gen - x_masked_test) ** 2).view(len(x_masked_test), -1).sum(dim=1).mean()
z_reg_loss_t = gamma * z_sampled.norm(dim=1).mean()
residual_x = beta * ((x_gen - (x_f - u)) ** 2).view(len(x_gen), -1).sum(dim=1).mean()
loss_t = residual_t + z_reg_loss_t + residual_x
psnr = psnr_t(x_test, x_gen)
psnr = 10 * np.log10(1 / psnr.item())
print("\rAt step=%0.3d|loss=%0.4f|residual_t=%0.4f|residual_x=%0.4f|z_reg=%0.5f|psnr=%0.3f" % (
t, loss_t.item(), residual_t.item(), residual_x.item(), z_reg_loss_t.item(), psnr), end="\r")
loss_t.backward()
return loss_t
def denoiser_step(x_f, u):
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False).detach()
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_f = 1 / (beta + alpha) * (beta * Denoiser(args.denoiser, args.sigma_f, x_f) + alpha * (x_gen + u))
u = u + x_gen - x_f
return x_f, u
optimizer.step(closure)
residual.append(residual_t.item())
if t % args.update_iter == args.update_iter - 1:
x_f, u = denoiser_step(x_f, u)
# try:
# optimizer.step(closure)
# residual.append(residual_t.item())
# if t % args.update_iter == 0:
# x_f, u = denoiser_step(x_f, u)
#
# except:
# skip_to_next = True
# break
if skip_to_next:
break
# getting recovered and true images
x_test_np = x_test.data.cpu().numpy().transpose(0, 2, 3, 1)
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_gen_np = x_gen.data.cpu().numpy().transpose(0, 2, 3, 1)
x_gen_np = np.clip(x_gen_np, 0, 1)
mask_np = mask.data.cpu().numpy()
x_masked_test = x_test * mask
x_masked_test_np = x_masked_test.data.cpu().numpy().transpose(0, 2, 3, 1)
x_masked_test_np = np.clip(x_masked_test_np, 0, 1)
Original.append(x_test_np)
Recovered.append(x_gen_np)
Masked.append(x_masked_test_np)
Residual_Curve.append(residual)
Mask.append(mask_np)
# freeing up memory for second loop
glow.zero_grad()
optimizer.zero_grad()
del x_test, x_gen, optimizer, psnr_t, z_sampled, glow, mask,
torch.cuda.empty_cache()
print("\nbatch completed")
if skip_to_next:
print("\nskipping current loop due to instability or user triggered quit")
continue
# metric evaluations
Original = np.vstack(Original)
Recovered = np.vstack(Recovered)
Masked = np.vstack(Masked)
Mask = np.vstack(Mask)
psnr = [compare_psnr(x, y) for x, y in zip(Original, Recovered)]
# print performance analysis
printout = "+-" * 10 + "%s" % args.dataset + "-+" * 10 + "\n"
printout = printout + "\t n_test = %d\n" % len(Recovered)
printout = printout + "\t inpaint_method = %s\n" % args.inpaint_method
printout = printout + "\t mask_size = %0.3f\n" % args.mask_size
printout = printout + "\t update_iter = %0.4f\n" % args.update_iter
printout = printout + "\t gamma = %0.6f\n" % gamma
printout = printout + "\t alpha = %0.6f\n" % alpha
printout = printout + "\t beta = %0.6f\n" % beta
printout = printout + "\t PSNR = %0.3f\n" % np.mean(psnr)
print(printout)
if args.save_metrics_text:
with open("%s_inpaint_glow_results.txt" % args.dataset, "a") as f:
f.write('\n' + printout)
# saving images
if args.save_results:
gamma = gamma.item()
file_names = [name[0].split("/")[-1].split(".")[0] for name in test_dataset.samples]
if args.init_strategy == 'random':
save_path = save_path + "/inpaint_%s_masksize_%0.4f_updateiter_%0.4f_gamma_%0.6f_alpha_%0.6f_beta_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path % (
args.inpaint_method, args.mask_size, args.update_iter, gamma, alpha, beta, args.steps, args.lr, args.init_std, args.optim)
else:
save_path = save_path + "/inpaint_%s_masksize_%0.4f_updateiter_%0.4f_gamma_%0.6f_alpha_%0.6f_beta_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path % (
args.inpaint_method, args.mask_size, args.update_iter, gamma, alpha, beta, args.steps, args.lr, args.init_strategy, args.optim)
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
save_path_1 = save_path + "_1"
if not os.path.exists(save_path_1):
os.makedirs(save_path_1)
save_path = save_path_1
else:
save_path_2 = save_path + "_2"
if not os.path.exists(save_path_2):
os.makedirs(save_path_2)
save_path = save_path_2
_ = [sio.imsave(save_path + "/" + name + "_recov.jpg", x) for x, name in zip(Recovered, file_names)]
_ = [sio.imsave(save_path + "/" + name + "_masked.jpg", x) for x, name in zip(Masked, file_names)]
Residual_Curve = np.array(Residual_Curve).mean(axis=0)
np.save(save_path + "/" + "residual_curve.npy", Residual_Curve)
np.save(save_path + "/original.npy", Original)
np.save(save_path + "/recovered.npy", Recovered)
np.save(save_path + "/mask.npy", Mask)
np.save(save_path + "/masked.npy", Masked)
def GlowInpaint(args):
loopOver = zip(args.gamma)
for gamma in loopOver:
skip_to_next = False # flag to skip to next loop if recovery is fails due to instability
n = args.size*args.size*3
modeldir = "./trained_models/%s/glow"%args.model
test_folder = "./test_images/%s"%args.dataset
save_path = "./results/%s/%s"%(args.dataset,args.experiment)
# loading dataset
trans = transforms.Compose([transforms.Resize((args.size,args.size)),transforms.ToTensor()])
test_dataset = datasets.ImageFolder(test_folder, transform=trans)
test_dataloader = torch.utils.data.DataLoader(test_dataset,batch_size=args.batchsize,drop_last=False,shuffle=False)
# loading glow configurations
config_path = modeldir+"/configs.json"
with open(config_path, 'r') as f:
configs = json.load(f)
# regularizor
gamma = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
# getting test images
Original = []
Recovered = []
Masked = []
Mask = []
Residual_Curve = []
for i, data in enumerate(test_dataloader):
# getting batch of data
x_test = data[0]
x_test = x_test.clone().to(device=args.device)
n_test = x_test.size()[0]
assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
# generate mask
mask = gen_mask(args.inpaint_method,args.size,args.mask_size)
mask = np.array([mask for i in range(n_test)])
mask = mask.reshape([n_test,1,args.size,args.size])
mask = torch.tensor(mask, dtype=torch.float, requires_grad=False, device=args.device)
# loading glow model
glow = Glow((3,args.size,args.size),
K=configs["K"],L=configs["L"],
coupling=configs["coupling"],
n_bits_x=configs["n_bits_x"],
nn_init_last_zeros=configs["last_zeros"],
device=args.device)
glow.load_state_dict(torch.load(modeldir+"/glowmodel.pt"))
glow.eval()
# making a forward to record shapes of z's for reverse pass
_ = glow(glow.preprocess(torch.zeros_like(x_test)))
# initializing z from Gaussian
if args.init_strategy == "random":
z_sampled = np.random.normal(0,args.init_std,[n_test,n])
z_sampled = torch.tensor(z_sampled,requires_grad=True,dtype=torch.float,device=args.device)
# initializing z from image with noise filled only in masked region
elif args.init_strategy == "noisy_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0,0.2, x_noisy_filled.size())
noise = torch.tensor(noise,dtype=torch.float,device=args.device)
noise = noise * (1-mask)
x_noisy_filled = x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with masked region inverted
elif args.init_strategy == "inverted_filled":
x_inverted_filled = x_test.clone().detach()
missing_x = x_inverted_filled.clone()
missing_x = missing_x.data.cpu().numpy()
missing_x = missing_x[:,:,::-1,::-1]
missing_x = torch.tensor(missing_x.copy(),dtype=torch.float,device=args.device)
missing_x = (1-mask)*missing_x
x_inverted_filled = x_inverted_filled * mask
x_inverted_filled = x_inverted_filled + missing_x
z, _, _ = glow(x_inverted_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from masked image ( masked region as zeros )
elif args.init_strategy == "black_filled":
x_black_filled = x_test.clone().detach()
x_black_filled = mask * x_black_filled
x_black_filled = x_black_filled * mask
z, _, _ = glow(x_black_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from noisy complete image
elif args.init_strategy == "noisy":
x_noisy = x_test.clone().detach()
noise = np.random.normal(0,0.05, x_noisy.size())
noise = torch.tensor(noise,dtype=torch.float,device=args.device)
x_noisy = x_noisy + noise
x_noisy = torch.clamp(x_noisy, 0, 1)
z, _, _ = glow(x_noisy - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with only noise in masked region
elif args.init_strategy == "only_noise_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0,0.2, x_noisy_filled.size())
noise = torch.tensor(noise,dtype=torch.float,device=args.device)
noise = noise * (1-mask)
x_noisy_filled = mask * x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
else:
raise "Initialization strategy not defined"
# selecting optimizer
if args.optim == "adam":
optimizer = torch.optim.Adam([z_sampled], lr=args.lr,)
elif args.optim == "lbfgs":
optimizer = torch.optim.LBFGS([z_sampled], lr=args.lr,)
# metrics to record over training
psnr_t = torch.nn.MSELoss().to(device=args.device)
residual = []
# running optimizer steps
for t in range(args.steps):
def closure():
optimizer.zero_grad()
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen,floor_clamp=False)
x_masked_test = x_test * mask
x_masked_gen = x_gen * mask
global residual_t
residual_t = ((x_masked_gen - x_masked_test)**2).view(len(x_masked_test),-1).sum(dim=1).mean()
z_reg_loss_t= gamma*z_sampled.norm(dim=1).mean()
loss_t = residual_t + z_reg_loss_t
psnr = psnr_t(x_test, x_gen)
psnr = 10 * np.log10(1 / psnr.item())
print("\rAt step=%0.3d|loss=%0.4f|residual=%0.4f|z_reg=%0.5f|psnr=%0.3f"%(t,loss_t.item(),residual_t.item(),z_reg_loss_t.item(), psnr),end="\r")
loss_t.backward()
return loss_t
optimizer.step(closure)
residual.append(residual_t.item())
# try:
# optimizer.step(closure)
# residual.append(residual_t.item())
# except:
# skip_to_next = True
# break
if skip_to_next:
break
# getting recovered and true images
x_test_np = x_test.data.cpu().numpy().transpose(0,2,3,1)
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen,floor_clamp=False)
x_gen_np = x_gen.data.cpu().numpy().transpose(0,2,3,1)
x_gen_np = np.clip(x_gen_np,0,1)
mask_np = mask.data.cpu().numpy()
x_masked_test = x_test * mask
x_masked_test_np = x_masked_test.data.cpu().numpy().transpose(0,2,3,1)
x_masked_test_np = np.clip(x_masked_test_np,0,1)
Original.append(x_test_np)
Recovered.append(x_gen_np)
Masked.append(x_masked_test_np)
Residual_Curve.append(residual)
Mask.append(mask_np)
# freeing up memory for second loop
glow.zero_grad()
optimizer.zero_grad()
del x_test, x_gen, optimizer, psnr_t, z_sampled, glow, mask,
torch.cuda.empty_cache()
print("\nbatch completed")
if skip_to_next:
print("\nskipping current loop due to instability or user triggered quit")
continue
# metric evaluations
Original = np.vstack(Original)
Recovered = np.vstack(Recovered)
Masked = np.vstack(Masked)
Mask = np.vstack(Mask)
psnr = [compare_psnr(x, y) for x,y in zip(Original, Recovered)]
# print performance analysis
printout = "+-"*10 + "%s"%args.dataset + "-+"*10 + "\n"
printout = printout + "\t n_test = %d\n"%len(Recovered)
printout = printout + "\t inpaint_method = %s\n"%args.inpaint_method
printout = printout + "\t mask_size = %0.3f\n"%args.mask_size
printout = printout + "\t gamma = %0.6f\n"%gamma
printout = printout + "\t PSNR = %0.3f\n"%np.mean(psnr)
print(printout)
if args.save_metrics_text:
with open("%s_inpaint_glow_results.txt"%args.dataset,"a") as f:
f.write('\n' + printout)
# saving images
if args.save_results:
gamma = gamma.item()
file_names = [name[0].split("/")[-1].split(".")[0] for name in test_dataset.samples]
if args.init_strategy == 'random':
save_path = save_path + "/inpaint_%s_masksize_%0.4f_gamma_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path%(args.inpaint_method,args.mask_size,gamma,args.steps,args.lr,args.init_std,args.optim)
else:
save_path = save_path + "/inpaint_%s_masksize_%0.4f_gamma_%0.6f_steps_%d_lr_%0.3f_init_%s_optim_%s"
save_path = save_path%(args.inpaint_method,args.mask_size,gamma,args.steps,args.lr,args.init_strategy,args.optim)
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
save_path_1 = save_path + "_1"
if not os.path.exists(save_path_1):
os.makedirs(save_path_1)
save_path = save_path_1
else:
save_path_2 = save_path + "_2"
if not os.path.exists(save_path_2):
os.makedirs(save_path_2)
save_path = save_path_2
_ = [sio.imsave(save_path+"/"+name+"_recov.jpg", x) for x,name in zip(Recovered,file_names)]
_ = [sio.imsave(save_path+"/"+name+"_masked.jpg", x) for x,name in zip(Masked,file_names)]
Residual_Curve = np.array(Residual_Curve).mean(axis=0)
np.save(save_path+"/"+"residual_curve.npy", Residual_Curve)
np.save(save_path+"/original.npy", Original)
np.save(save_path+"/recovered.npy", Recovered)
np.save(save_path+"/mask.npy", Mask)
np.save(save_path+"/masked.npy", Masked)
def GANInpaint(args):
loopOver = zip(args.gamma)
for gamma in loopOver:
n = 100
modeldir = "./trained_models/%s/dcgan"%args.model
test_folder = "./test_images/%s"%args.dataset
save_path = "./results/%s/%s"%(args.dataset,args.experiment)
# loading dataset
trans = transforms.Compose([transforms.Resize((args.size,args.size)),transforms.ToTensor()])
test_dataset = datasets.ImageFolder(test_folder, transform=trans)
test_dataloader = torch.utils.data.DataLoader(test_dataset,batch_size=args.batchsize,drop_last=False,shuffle=False)
# regularizor
gamma = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
# getting test images
Original = []
Recovered = []
Masked = []
Mask = []
Residual_Curve = []
for i, data in enumerate(test_dataloader):
# getting batch of data
x_test = data[0]
x_test = x_test.clone().to(device=args.device)
n_test = x_test.size()[0]
assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
# generate mask
mask = gen_mask(args.inpaint_method,args.size,args.mask_size)
mask = np.array([mask for i in range(n_test)])
mask = mask.reshape([n_test,1,args.size,args.size])
mask = torch.tensor(mask,dtype=torch.float,requires_grad=False, device=args.device)
# loading dcgan model
generator = Generator(ngpu=1).to(device=args.device)
generator.load_state_dict(torch.load(modeldir+'/dcgan_G.pt'))
generator.eval()
# initializing latent code z from Gaussian
if args.init_strategy == "random":
z_sampled = | np.random.normal(0,args.init_std,[n_test,n,1,1]) | numpy.random.normal |
#!/usr/bin/env python3
"""
copyed from https://github.com/wleftwich/aoc2020/blob/main/29_jurassic_jigsaw.py, under MIT license
"""
import math
from collections import defaultdict, Counter
import sys
import numpy as np
from scipy import ndimage
datafile = sys.argv[1] if len(sys.argv) > 1 else "input1"
NESW = [0+1j, 1+0j, 0-1j, -1+0j]
def parse_data(txt):
recs = (y for y in (x.strip() for x in txt.split('\n\n')) if y)
return [parse_rec(rec) for rec in recs]
def parse_rec(rec):
lines = [y for y in (x.strip() for x in rec.split('\n')) if y]
label = lines[0].split()[1][:-1]
tile = [
[int(x) for x in line.replace('#', '1').replace('.', '0')]
for line in lines[1:]
]
return (label, np.array(tile))
def tile_orientations(tile):
for r in range(4):
a = np.rot90(tile, r)
yield a
yield | np.fliplr(a) | numpy.fliplr |
"""
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a MLP on MNIST
"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import nutsflow as nf
import nutsml as nm
import numpy as np
from nutsml.network import PytorchNetwork
from utils import download_mnist, load_mnist
class Model(nn.Module):
"""Pytorch model"""
def __init__(self, device):
"""Construct model on given device, e.g. 'cpu' or 'cuda'"""
super(Model, self).__init__()
self.fc1 = nn.Linear(28 * 28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
self.to(device) # set device before constructing optimizer
# required properties of a model to be wrapped as PytorchNetwork!
self.device = device # 'cuda', 'cuda:0' or 'gpu'
self.losses = nn.CrossEntropyLoss() # can be list of loss functions
self.optimizer = optim.Adam(self.parameters())
def forward(self, x):
"""Forward pass through network for input x"""
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def accuracy(y_true, y_pred):
"""Compute accuracy"""
from sklearn.metrics import accuracy_score
y_pred = [yp.argmax() for yp in y_pred]
return 100 * accuracy_score(y_true, y_pred)
def evaluate(network, x, y):
"""Evaluate network performance (here accuracy)"""
metrics = [accuracy]
build_batch = (nm.BuildBatch(64)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
acc = zip(x, y) >> build_batch >> network.evaluate(metrics)
return acc
def train(network, epochs=3):
"""Train network for given number of epochs"""
print('loading data...')
filepath = download_mnist()
x_train, y_train, x_test, y_test = load_mnist(filepath)
plot = nm.PlotLines(None, every_sec=0.2)
build_batch = (nm.BuildBatch(128)
.input(0, 'vector', 'float32')
.output(1, 'number', 'int64'))
for epoch in range(epochs):
print('epoch', epoch + 1)
losses = (zip(x_train, y_train) >> nf.PrintProgress(x_train) >>
nf.Shuffle(1000) >> build_batch >>
network.train() >> plot >> nf.Collect())
acc_test = evaluate(network, x_test, y_test)
acc_train = evaluate(network, x_train, y_train)
print('train loss : {:.6f}'.format( | np.mean(losses) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 11:46:09 2020
@author: <NAME>
"""
import pickle
import numpy as np
import numpy.linalg as la
from numpy.random import default_rng
# Create the random number generator
rng = default_rng()
class Organization(object):
"""Defines a class Organization which contains an organization network
structure (a.k.a. an organizational form) populated with agents."""
def __init__(self, struct="tree"):
"""Creates an instance of class Organization with a specified structure
and corresponding parameters for that structure. The default is a
standard tree organizational form.
Parameters
----------
struct : STRING, optional
Defines the form or structure of the organization. The
default is "tree".
pops : Population, required
One or more populations provided to the organization in an array of
populations.
Returns
-------
None.
"""
# Set org structure
self.struct = struct
# Create network graph of organization
if self.struct == "tree":
# Load organization, parents, and siblings from file
self.org = pickle.load(open("cliquetree_org.pickle","rb"))
self.A_pars = pickle.load(open("cliquetree_parents.pickle","rb"))
self.A_sibs = pickle.load(open("cliquetree_siblings.pickle","rb"))
# Define other relationships
self.A_gpars = np.matmul(self.A_pars,self.A_pars)
self.A_kids = np.transpose(self.A_pars)
self.A_gkids = np.matmul(self.A_kids,self.A_kids)
# Correct grandparent relationship for those without grandparents
self.A_gpars[0:6,0] = np.ones((6))
else:
print("Input 'struct' for 'Organization' is not valid.")
"""Population Variables"""
self.pops = [] # List of populations for the org
self.from_pop = [] # Array of populations that current employees are from
"""Network Count Parameters"""
# For nodes, parents, grandparents, siblings,kids, and grandkids. No
# values are allowed to be zero because they're mostly used as
# divisors and the matrices will be zero in those cases.
self.n_nodes = len(self.org.nodes())
self.id = np.identity(self.n_nodes)
self.norm_pars = np.divide(self.id,np.sum(self.A_pars,axis=1) \
+ np.array(np.sum(self.A_pars,axis=1) == 0))
self.norm_gpars = np.divide(self.id,np.sum(self.A_gpars,axis=1) \
+ np.array(np.sum(self.A_gpars,axis=1) == 0))
self.norm_sibs = np.divide(self.id,np.sum(self.A_sibs,axis=1) \
+ np.array(np.sum(self.A_sibs,axis=1) == 0))
self.norm_kids = np.divide(self.id,np.sum(self.A_kids,axis=1) \
+ np.array(np.sum(self.A_kids,axis=1) == 0))
self.norm_gkids = np.divide(self.id,np.sum(self.A_gkids,axis=1) \
+ np.array(np.sum(self.A_gkids,axis=1) == 0))
"""Unit Vectors"""
self.unit_x = np.array([1,0,0])
self.unit_y = | np.array([0,1,0]) | numpy.array |
import argparse
import json
import logging
from os.path import join, exists
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from debias.datasets import triviaqa_cp
from debias.models.model_dir import ModelDir
from debias.utils import py_utils, ops
from triviaqa_cp import triviaqa_cp_evaluation
def get_cache_name(dataset_name, part_name):
return dataset_name + "-" + part_name
def get_predictions(path, dataset_name, part, bach_size=128, sample=None, cache=True):
output_file = join(path, "%s-predictions.json" % get_cache_name(dataset_name, part))
if sample is None and cache and exists(output_file):
return py_utils.load_json(output_file)
logging.info("Computing predictions for %s on %s..." % (path, dataset_name))
logging.info("Loading model...")
model_dir = ModelDir(path)
model = model_dir.get_model()
logging.info("Setup data...")
data = triviaqa_cp.load_annotated_triviaqa_cp(dataset_name, part)
data.sort(key=lambda x: len(x.tokens))
voc = triviaqa_cp.compute_voc(data)
model.set_vocab(voc)
tuples = triviaqa_cp.convert_to_tuples(data)
if sample is not None:
| np.random.shuffle(tuples) | numpy.random.shuffle |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': | np.array([0.80403091954169, -0.844515250413482]) | numpy.array |
import flowws
from flowws import Argument as Arg
import freud
import numpy as np
import plato
import plato.draw.vispy as draw
import rowan
def circle_patterns(locations, radii, Npoints=128, z=0):
locations = np.array(locations)
thetas = | np.linspace(0, 2*np.pi, Npoints, endpoint=False) | numpy.linspace |
#!/usr/bin/env python
# coding: utf-8
from IPython.display import clear_output
import numpy as np
import pandas as pd
import os
import glob
import logging
import sys
from pathlib import Path
sys.path.append(str(Path('.').absolute().parent))
from importlib import reload
from tqdm import tqdm
import pickle
from pygmo.core import hypervolume
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import random
import torch
import torch.optim as optim
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset, random_split
from dataloader.fairness_datahandler import FairnessDataHandler
from dataloader.fairness_dataset import CustomDataset
from models.linear_regression import LinearRegression
from models.nn1 import NN1
from models.nn2 import NN2
from loss.losses import *
from metric.metrics import *
from trainer import Trainer
from single_objective_trainer import SingleObjectiveTrainer
from validator import Validator
from loss.loss_class import Loss
from public_experiments.pareto_utils import *
device = torch.device('cpu')
get_ipython().run_line_magic('matplotlib', 'inline')
X = np.load('data/adult/X.npy')
y = np.load('data/adult/y.npy')
X1 = torch.from_numpy(X).float().to(device)
y1 = torch.from_numpy(y).float().to(device)
input_dimension = X1.shape[1]
data = CustomDataset(X1, y1)
total_samples = X1.shape[0]
train_samples = 10000
val_samples = 5000
test_samples = int(total_samples - train_samples - val_samples)
train_data, val_data, test_data = random_split(data, [train_samples, val_samples, test_samples])
# set up logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger('adult')
logger.setLevel(logging.INFO)
# Multi-objective setup
zero_weight = (y[:,0] == 0).sum()
one_weight = (y[:,0] == 1).sum()
n = max(zero_weight, one_weight)
weights = torch.tensor([np.float(n)/zero_weight, np.float(n)/one_weight], dtype=torch.float, device=device)
save_to_path = 'saved_models/adult/'
input_dim = train_data.dataset.x.shape[1]
lr = 1e-2
def build_model_adult(fairness_notion='ddp'):
model = NN1(input_dimension=input_dim)
model.to(device)
model.apply(weights_init)
optimizer = optim.Adam(model.parameters(), lr=lr)
# Loss functions
performance_loss = BCELoss(name='adult_bce')
sex_loss_EOP = TPRLoss(name='adult_DPP_sex', reg_lambda=0.1, reg_type='tanh')
sex_loss_DDP = DPLoss(name='adult_DPP_sex', reg_lambda=0.1, reg_type='tanh')
if(fairness_notion == 'ddp'):
losses = [performance_loss, sex_loss_DDP]
elif(fairness_notion == 'deo'):
losses = [performance_loss, sex_loss_EOP]
elif(fairness_notion == 'both'):
losses = [performance_loss, sex_loss_DDP, sex_loss_EOP]
return model, optimizer, losses
def adult_multi_fairness(fairness_notion='ddp'):
# metrics
accuracy = Accuracy(name='accuracy')
ddp = DPDiff(name='DDP')
deo = TPRDiff(name='DEO')
if(fairness_notion == 'ddp'):
validation_metrics = [accuracy, ddp]
elif(fairness_notion == 'deo'):
validation_metrics = [accuracy, deo]
elif(fairness_notion == 'both'):
validation_metrics = [accuracy, ddp, deo]
scores_mo_zenith_adult = []
losses_mo_zenith_adult = []
matches_zenith = []
matches_fair = []
for i in tqdm(range(10)):
train_data, val_data, test_data = random_split(data, [train_samples, val_samples, test_samples])
data_handler = FairnessDataHandler('adult', train_data, val_data, test_data)
model, optimizer, losses = build_model_adult(fairness_notion)
files = glob.glob(save_to_path + '*')
for f in files:
os.remove(f)
trainer_adult = Trainer(data_handler, model, losses, validation_metrics, save_to_path, params='yaml_files/trainer_params_adult.yaml', optimizer=optimizer)
trainer_adult.train()
scores_val = to_np(trainer_adult.pareto_manager._pareto_front)
chosen_score_zenith, idx_zenith = get_solution(scores_val)
####### closest to zenith point #############
model_val = NN1(input_dimension=input_dim)
model_val.to(device)
match_zenith = '_'.join(['%.4f']*len(chosen_score_zenith)) % tuple(chosen_score_zenith)
files = glob.glob(save_to_path + '*')
for f in files:
if(match_zenith in f):
model_val.load_state_dict(torch.load(f))
continue
test_len = data_handler.get_testdata_len()
test_loader = data_handler.get_test_dataloader(drop_last=False, batch_size=test_len)
test_validator = Validator(model_val, test_loader, validation_metrics, losses)
test_metrics, test_losses = test_validator.evaluate()
scores_mo_zenith_adult.append(test_metrics)
losses_mo_zenith_adult.append(test_losses)
scores_mo_adult = np.array(scores_mo_zenith_adult)
clear_output(wait=True)
if(fairness_notion == 'both'):
print('The error has mean ', 1-np.mean(scores_mo_adult[:,[0]]), ' and standard deviation ', np.std(scores_mo_adult[:,[0]]))
print('The ddp has mean ', 1-np.mean(scores_mo_adult[:,[1]]), ' and standard deviation ', np.std(scores_mo_adult[:,[1]]))
print('The deo has mean ', 1- | np.mean(scores_mo_adult[:,[2]]) | numpy.mean |
### Importing dependencies
# Gaussian sampler
from sampler import samplerz
# Gaussian sampler with repetitions
from sampler_rep import samplerz_rep
# Imports Falcon signature scheme
from falcon import falcon
# Estimators for moments
from scipy.stats import skew, kurtosis, moment
# Statistical (normality) tests
from scipy.stats import chisquare
# Distributions
from scipy.stats import chi2, norm
# Numpy stuff
from numpy import cov, set_printoptions, diag, array, mean
from numpy.linalg import matrix_rank, inv, eig, eigh
import matplotlib.pyplot as plt
# Math functions
from math import ceil, sqrt, exp, log
# Data management
from copy import deepcopy
import re
import pandas
# Uniformity
from random import uniform
# For HZ multivariate test, used in scipy.spatial.distance.mahalanobis
from numpy import floor
from numpy import tile
# qqplot
import scipy.stats as stats
from numpy import transpose, sort
# <NAME>
from numpy import corrcoef, power
from numpy import log as nplog
from numpy import sqrt as npsqrt
# mvn plot test
from numpy import histogram
# rejection testing
from collections import Counter, defaultdict
from numpy import arange
# import csv files
import csv
# For debugging purposes
import sys
import time
if sys.version_info >= (3, 4):
from importlib import reload # Python 3.4+ only.
# Tailcut rate
tau = 14
# Minimal size of a bucket for the chi-squared test (must be >= 5)
chi2_bucket = 10
# Minimal p-value
pmin = 0.001
# Print options
set_printoptions(precision=4)
def gaussian(x, mu, sigma):
"""
Gaussian function of center mu and "standard deviation" sigma.
"""
return exp(- ((x - mu) ** 2) / (2 * (sigma ** 2)))
def make_gaussian_pdt(mu, sigma):
"""
Make the probability distribution table (PDT) of a discrete Gaussian.
The output is a dictionary.
"""
# The distribution is restricted to [-zmax, zmax).
zmax = ceil(tau * sigma)
pdt = dict()
for z in range(int(floor(mu)) - zmax, int(ceil(mu)) + zmax):
pdt[z] = gaussian(z, mu, sigma)
gauss_sum = sum(pdt.values())
for z in pdt:
pdt[z] /= gauss_sum
return pdt
class UnivariateSamples:
"""
Class for computing statistics on univariate Gaussian samples.
"""
def __init__(self, mu, sigma, list_samples):
"""
Input:
- the expected center mu of a discrete Gaussian over Z
- the expected standard deviation sigma of a discrete Gaussian over Z
- a list of samples defining an empiric distribution
Output:
- the means of the expected and empiric distributions
- the standard deviations of the expected and empiric distributions
- the skewness of the expected and empiric distributions
- the kurtosis of the expected and empiric distributions
- a chi-square test between the two distributions
"""
zmax = ceil(tau * sigma)
# Expected center standard variation.
self.exp_mu = mu
self.exp_sigma = sigma
# Number of samples
self.nsamples = len(list_samples)
self.histogram = dict()
self.outlier = 0
# Initialize histogram
for z in range(int(floor(mu)) - zmax, int(ceil(mu)) + zmax):
self.histogram[z] = 0
for z in list_samples:
# Detect and count outliers (samples not in [-zmax, zmax))
if z not in self.histogram:
self.outlier += 1
# Fill histogram according to the samples
else:
self.histogram[z] += 1
# Empiric mean, variance, skewness, kurtosis and standard deviation
self.mean = sum(list_samples) / self.nsamples
self.variance = moment(list_samples, 2)
self.skewness = skew(list_samples)
self.kurtosis = kurtosis(list_samples)
self.stdev = sqrt(self.variance)
# Chi-square statistic and p-value
self.chi2_stat, self.chi2_pvalue = self.chisquare()
# Final assessment: the dataset is valid if:
# - the chi-square p-value is higher than pmin
# - there is no outlier
self.is_valid = True
self.is_valid &= (self.chi2_pvalue > pmin)
self.is_valid &= (self.outlier == 0)
def __repr__(self):
"""
Print the sample statistics in a readable form.
"""
rep = "\n"
rep += "Testing a Gaussian sampler with center = {c} and sigma = {s}\n".format(c=self.exp_mu, s=self.exp_sigma)
rep += "Number of samples: {nsamples}\n\n".format(nsamples=self.nsamples)
rep += "Moments | Expected Empiric\n"
rep += "---------+----------------------\n"
rep += "Mean: | {exp:.5f} {emp:.5f}\n".format(exp=self.exp_mu, emp=self.mean)
rep += "St. dev. | {exp:.5f} {emp:.5f}\n".format(exp=self.exp_sigma, emp=self.stdev)
rep += "Skewness | {exp:.5f} {emp:.5f}\n".format(exp=0, emp=self.skewness)
rep += "Kurtosis | {exp:.5f} {emp:.5f}\n".format(exp=0, emp=self.kurtosis)
rep += "\n"
rep += "Chi-2 statistic: {stat}\n".format(stat=self.chi2_stat)
rep += "Chi-2 p-value: {pval} (should be > {p})\n".format(pval=self.chi2_pvalue, p=pmin)
rep += "\n"
rep += "How many outliers? {o}".format(o=self.outlier)
rep += "\n\n"
rep += "Is the sample valid? {i}".format(i=self.is_valid)
return rep
def chisquare(self):
"""
Run a chi-square test to compare the expected and empiric distributions
"""
# We construct two histograms:
# - the expected one (exp_histogram)
# - the empirical one (histogram)
histogram = deepcopy(self.histogram)
# The chi-square test require buckets to have enough elements,
# so we aggregate samples in the left and right tails in two buckets
exp_histogram = make_gaussian_pdt(self.exp_mu, self.exp_sigma)
obs = list(histogram.values())
exp = list(exp_histogram.values())
z = 0
while(1):
if (z >= len(exp) - 1):
break
while (z < len(exp) - 1) and (exp[z] < chi2_bucket / self.nsamples):
obs[z + 1] += obs[z]
exp[z + 1] += exp[z]
obs.pop(z)
exp.pop(z)
z += 1
obs[-2] += obs[-1]
exp[-2] += exp[-1]
obs.pop(-1)
exp.pop(-1)
exp = [round(prob * self.nsamples) for prob in exp]
diff = self.nsamples - sum(exp_histogram.values())
exp_histogram[int(round(self.exp_mu))] += diff
res = chisquare(obs, f_exp=exp)
return res
class MultivariateSamples:
"""
Class for computing statistics on multivariate Gaussian samples
"""
def __init__(self, sigma, list_samples):
"""
Input:
- sigma: an expected standard deviation
- list_samples: a list of (expected) multivariate samples
Output:
- univariates[]: a list of UnivariateSamples objects (one / coordinate)
- covariance: an empiric covariance matrix
- DH, AS, PO, PA: statistics and p-values for the Doornik-Hansen test
- dc_pvalue: a p-value for our custom covariance-based test
"""
# Parse the signatures and store them
self.nsamples = len(list_samples)
self.dim = len(list_samples[0])
self.data = pandas.DataFrame(list_samples)
# Expected center and standard deviation
self.exp_mu = 0
self.exp_si = sigma
# Testing sphericity
# For each coordinate, perform an univariate analysis
self.univariates = [None] * self.dim
for i in range(self.dim):
self.univariates[i] = UnivariateSamples(0, sigma, self.data[i])
self.nb_gaussian_coord = sum((self.univariates[i].chi2_pvalue > pmin) for i in range(self.dim))
# Estimate the (normalized) covariance matrix
self.covariance = cov(self.data.transpose()) / (self.exp_si ** 2)
self.DH, self.AS, self.PO, self.PA = doornik_hansen(self.data)
self.dc_pvalue = diagcov(self.covariance, self.nsamples)
def __repr__(self):
"""
Print the sample statistics in a readable form.
"""
rep = "\n"
rep += "Testing a centered multivariate Gaussian of dimension = {dim} and sigma = {s:.3f}\n".format(dim=self.dim, s=self.exp_si)
rep += "Number of samples: {nsamples}\n".format(nsamples=self.nsamples)
rep += "\n"
rep += "The test checks that the data corresponds to a multivariate Gaussian, by doing the following:\n"
rep += "1 - Print the covariance matrix (visual check). One can also plot\n"
rep += " the covariance matrix by using self.show_covariance()).\n"
rep += "2 - Perform the Doornik-Hansen test of multivariate normality.\n"
rep += " The p-value obtained should be > {p}\n".format(p=pmin)
rep += "3 - Perform a custom test called covariance diagonals test.\n"
rep += "4 - Run a test of univariate normality on each coordinate\n"
rep += "\n"
rep += "1 - Covariance matrix ({dim} x {dim}):\n{cov}\n".format(dim=self.dim, cov=self.covariance)
rep += "\n"
if (self.nsamples < 4 * self.dim):
rep += "Warning: it is advised to have at least 8 times more samples than the dimension n.\n"
rep += "2 - P-value of Doornik-Hansen test: {p:.4f}\n".format(p=self.PO)
rep += "\n"
rep += "3 - P-value of covariance diagonals test: {p:.4f}\n".format(p=self.dc_pvalue)
rep += "\n"
rep += "4 - Gaussian coordinates (w/ st. dev. = sigma)? {k} out of {dim}\n".format(k=self.nb_gaussian_coord, dim=self.dim)
return rep
def show_covariance(self):
"""
Visual representation of the covariance matrix
"""
plt.imshow(self.covariance, interpolation='nearest')
plt.show()
def mardia(self):
"""
Mardia's test of multivariate normality.
The test compute estimators:
- A for the "generalized skewness"
- B for the "generalized kurtosis"
If the data is multivariate normal, them:
- A should follow a chi-2 distribution
- B should follow a normal distribution
Warning: For high dimensions, the function converges very slowly,
requires many samples, is very slow and uses lots of memory.
"""
if (self.nsamples < 500):
print("At least 500 samples are recommended for Mardia's test")
nsamp = self.nsamples
dim = self.dim
means = [list(self.data.mean())] * nsamp
# cdata centers the data around its mean
cdata = (self.data - pandas.DataFrame(means))
# S estimates the covariance matrix
S = sum(cdata[i:i + 1].transpose().dot(cdata[i:i + 1]) for i in range(nsamp))
S /= nsamp # S has size dim * dim
A0 = inv(S) # A0 has size dim * dim
A1 = A0.dot(cdata.transpose()) # A1 has size dim * nsamp
# Initializing A and B
A = 0
B = 0
# Computing the sums in A and B
for i in range(nsamp):
row = cdata[i:i + 1]
a = list(row.dot(A1)[0:1].values[0])
A += sum(elt ** 3 for elt in a)
B += a[i] ** 2
# Normalization of A and B
A /= (6 * nsamp)
B /= nsamp
B -= dim * (dim + 2)
B *= sqrt(nsamp / (8 * dim * (dim + 2)))
# A should follow a chi-2 distribution w/ chi_df degrees of freedom
# B should follow a normal distribution
chi_df = dim * (dim + 1) * (dim + 2) / 6
pval_A = 1 - chi2.cdf(A, chi_df)
pval_B = 1 - norm.cdf(B)
A = A
B = B
return (A, B, pval_A, pval_B)
def doornik_hansen(data):
"""
Perform the Doornik-Hansen test
(https://doi.org/10.1111/j.1468-0084.2008.00537.x)
This computes and transforms multivariate variants of the skewness
and kurtosis, then computes a chi-square statistic on the results.
"""
data = pandas.DataFrame(data)
data = deepcopy(data)
n = len(data)
p = len(data.columns)
# R is the correlation matrix, a scaling of the covariance matrix
# R has dimensions dim * dim
R = corrcoef(data.transpose())
L, V = eigh(R)
for i in range(p):
if(L[i] <= 1e-12):
L[i] = 0
if(L[i] > 1e-12):
L[i] = 1 / sqrt(L[i])
L = diag(L)
if(matrix_rank(R) < p):
V = pandas.DataFrame(V)
G = V.loc[:, (L != 0).any(axis=0)]
data = data.dot(G)
ppre = p
p = data.size / len(data)
raise ValueError("NOTE:Due that some eigenvalue resulted zero, a new data matrix was created. Initial number of variables = ", ppre, ", were reduced to = ", p)
R = corrcoef(data.transpose())
L, V = eigh(R)
L = diag(L)
means = [list(data.mean())] * n
stddev = [list(data.std(ddof=0))] * n
Z = (data - pandas.DataFrame(means)) / pandas.DataFrame(stddev)
Zp = Z.dot(V)
Zpp = Zp.dot(L)
st = Zpp.dot(transpose(V))
# skew is the multivariate skewness (dimension dim)
# kurt is the multivariate kurtosis (dimension dim)
skew = mean(power(st, 3), axis=0)
kurt = mean(power(st, 4), axis=0)
# Transform the skewness into a standard normal z1
n2 = n * n
b = 3 * (n2 + 27 * n - 70) * (n + 1) * (n + 3)
b /= (n - 2) * (n + 5) * (n + 7) * (n + 9)
w2 = -1 + sqrt(2 * (b - 1))
d = 1 / sqrt(log(sqrt(w2)))
y = skew * sqrt((w2 - 1) * (n + 1) * (n + 3) / (12 * (n - 2)))
# Use numpy log/sqrt as math versions dont have array input
z1 = d * nplog(y + npsqrt(y * y + 1))
# Transform the kurtosis into a standard normal z2
d = (n - 3) * (n + 1) * (n2 + 15 * n - 4)
a = (n - 2) * (n + 5) * (n + 7) * (n2 + 27 * n - 70) / (6 * d)
c = (n - 7) * (n + 5) * (n + 7) * (n2 + 2 * n - 5) / (6 * d)
k = (n + 5) * (n + 7) * (n * n2 + 37 * n2 + 11 * n - 313) / (12 * d)
al = a + (skew ** 2) * c
chi = (kurt - 1 - (skew ** 2)) * k * 2
z2 = (((chi / (2 * al)) ** (1 / 3)) - 1 + 1 / (9 * al)) * npsqrt(9 * al)
kurt -= 3
# omnibus normality statistic
DH = z1.dot(z1.transpose()) + z2.dot(z2.transpose())
AS = n / 6 * skew.dot(skew.transpose()) + n / 24 * kurt.dot(kurt.transpose())
# degrees of freedom
v = 2 * p
# p-values
PO = 1 - chi2.cdf(DH, v)
PA = 1 - chi2.cdf(AS, v)
return DH, AS, PO, PA
def diagcov(cov_mat, nsamples):
"""
This test studies the population covariance matrix.
Suppose it is of this form:
____________
| | |
| 1 | 3 |
|_____|_____|
| | |
| | 2 |
|_____|_____|
The test will first compute sums of elements on diagonals of 1, 2 or 3,
and store them in the table diagsum of size 2 * dim:
- First (dim / 2) lines = means of each diag. of 1 above leading diag.
- Following (dim / 2) lines = means of each diag. of 2 above leading diag.
- Following (dim / 2) lines = means of each diag. of 3 above leading diag.
- Last (dim / 2) lines = means of each diag. of 3 below leading diag.
We are making the assumption that each cell of the covariance matrix
follows a normal distribution of variance 1 / n. Assuming independence
of each cell in a diagonal, each diagonal sum of k elements should
follow a normal distribution of variance k / n (hence of variance
1 after normalization by n / k).
We then compute the sum of the squares of all elements in diagnorm.
If is supposed to look like a chi-square distribution
"""
dim = len(cov_mat)
n0 = dim // 2
diagsum = [0] * (2 * dim)
for i in range(1, n0):
diagsum[i] = sum(cov_mat[j][i + j] for j in range(n0 - i))
diagsum[i + n0] = sum(cov_mat[n0 + j][n0 + i + j] for j in range(n0 - i))
diagsum[i + 2 * n0] = sum(cov_mat[j][n0 + i + j] for j in range(n0 - i))
diagsum[i + 3 * n0] = sum(cov_mat[j][n0 - i + j] for j in range(n0 - i))
# Diagnorm contains the normalized sums, which should be normal
diagnorm = diagsum[:]
for i in range(1, n0):
nfactor = sqrt(nsamples / (n0 - i))
diagnorm[i] *= nfactor
diagnorm[i + n0] *= nfactor
diagnorm[i + 2 * n0] *= nfactor
diagnorm[i + 3 * n0] *= nfactor
# Each diagnorm[i + _ * n0] should be a random normal variable
chistat = sum(elt ** 2 for elt in diagnorm)
pvalue = 1 - chi2.cdf(chistat, df=4 * (n0 - 1))
return pvalue
def test_pysampler(nb_mu=100, nb_sig=100, nb_samp=100):
"""
Test our Gaussian sampler on a bunch of samples.
"""
print("Testing the sampler over Z with:\n")
print("- {a} different centers\n".format(a=nb_mu))
print("- {a} different sigmas\n".format(a=nb_sig))
print("- {a} samples per center and sigma\n".format(a=nb_samp))
assert(nb_samp >= 10 * chi2_bucket)
q = 12289
sig_min = 1.3
sig_max = 1.8
nb_rej = 0
for i in range(nb_mu):
mu = uniform(0, q)
for j in range(nb_sig):
sigma = uniform(sig_min, sig_max)
list_samples = [samplerz(mu, sigma) for _ in range(nb_samp)]
v = UnivariateSamples(mu, sigma, list_samples)
if (v.chi2_pvalue < pmin):
nb_rej += 1
print("The test failed {k} times out of {t} (expected {e})".format(k=nb_rej, t=nb_mu * nb_sig, e=round(pmin * nb_mu * nb_sig)))
def parse_univariate_file(filename):
"""
Parse a file containing several univariate samples.
Input:
- the file name of a file containing 2 * k lines of this form:
- line 2 * i: "mu = xxx, sigma = yyy"
- line 2 * i + 1: zzz samples
Output:
- a Python list containing k elements:
- each element is of the form (mu, sigma, data):
- mu = expected center of a Gaussian
- sigma = expected standard deviation of a Gaussian
- data = samples from a distribution to test against the Gaussian
parametrized by (mu, sigma)
"""
# Initialize the output as the empty list
data_list = []
# Open the file
with open(filename) as f:
while True:
line1 = f.readline()
line2 = f.readline()
if not line2:
break # EOF
# Parsing mu and sigma
(_, mu, sigma, _) = re.split("mu = |, sigma = |\n", line1)
mu = float(mu)
sigma = float(sigma)
# Parsing the samples z
data = re.split(", |,\n", line2)
data = [int(elt) for elt in data[:-1]]
# Add the triple (mu, sigma, data) to the output
data_list += [(mu, sigma, data)]
return data_list
def parse_multivariate_file(filename):
"""
Parse a file containing several multivariate samples.
Input:
- the file name of a file containing k lines
- each line corresponds to a multivariate sample
- the samples are all assumed to be from the same distribution
Output:
- sigma: the expected standard deviation of the samples
- data: a Python list of length k, containing all the samples
"""
with open(filename) as f:
sigma = 0
data = []
while True:
# Parse each line
line = f.readline()
if not line:
break # EOF
sample = re.split(", |,\n", line)
sample = [int(elt) for elt in sample[:-1]]
data += [sample]
sigma += sum(elt ** 2 for elt in sample)
# sigma is the expected sigma based on the samples
sigma = sqrt(sigma / (len(data) * len(data[0])))
return (sigma, data)
def test_falcon():
"""
We test samples obtained directly from Falcon's reference implementation.
We test:
- univariate samples from the sampler over Z
- multivariate samples from the signature scheme
"""
# We first test the Gaussian sampler over Z, using the samples in:
# - testdata/sampler_fpnative
# - testdata/sampler_avx2
# - testdata/sampler_fpemu
# Each file should be formatted as to be parsable with parse_univariate_file()
for filename in ["sampler_fpnative", "sampler_avx2", "sampler_fpemu"]:
print("Testing data in file testdata/{file}:".format(file=filename))
data_list = parse_univariate_file("testdata/" + filename)
# n_mu_and_sig is the number of different couples (mu, sigma)
n_mu_and_sig = len(data_list)
n_invalid = 0
for elt in data_list:
(mu, sigma, data) = elt
u = UnivariateSamples(mu, sigma, data)
n_invalid += (u.is_valid is False)
print("- We tested {k} different (mu, sigma) list of samples".format(k=n_mu_and_sig))
print("- We found {k} invalid list of samples\n".format(k=n_invalid))
# Now we test the distribution of signatures using the multivariate test.
# Each element of this filelist is a text file containing a large number
# of multivariate samples. Each file should be formatted as to be
# parsable with parse_multivariate_file()
filelist = [
"falcon64_avx2",
"falcon128_avx2",
"falcon256_avx2",
"falcon512_avx2",
"falcon1024_avx2",
"falcon64_fpnative",
"falcon128_fpnative",
"falcon256_fpnative",
"falcon512_fpnative",
"falcon1024_fpnative",
"falcon64_fpemu_big",
"falcon128_fpemu_big",
"falcon256_fpemu_big",
"falcon512_fpemu_big",
"falcon1024_fpemu_big",
]
for filename in filelist:
print("\n\nTesting data in file testdata/{file}:".format(file=filename))
(sigma, data) = parse_multivariate_file("testdata/" + filename)
mv = MultivariateSamples(sigma, data)
print(mv)
return
def test_sig(n=128, nb_sig=1000, perturb=False, level=0):
"""
Test signatures output by a Python implementation of Falcon.
This test allow to perturb the FFT by setting the rightmost node
of the FFT tree (of the private key) to 0. One can check that, at
least for moderate levels (0 to 4), the test will end up detecting
(via diagcov) that the signatures output do not follow the correct
distribution.
Input:
- n: the degree of the ring
- nb_sig: number of signatures
- perturb: if set to 1, one node in the FFT tree is set to 0
- level: determines which node (the rightmost one at a given level)
is set to 0
"""
start = time.time()
# Generate a private key
sk = falcon.SecretKey(n)
# Perturb the FFT tree
if perturb is True:
# Check that the level is less than the FFT tree depth
assert(1 << level) < n
u, k = sk.T_fft, n
# Find the node
for _ in range(level):
u = u[2]
k >>= 1
# Zero-ize the node
u[0] = [0] * k
end = time.time()
print("Took {t:.2f} seconds to generate the private key.".format(t=end - start))
# Compute signatures
message = "0"
start = time.time()
list_signatures = [sk.sign(message, reject=False) for _ in range(nb_sig)]
# Strip away the nonces and concatenate the s_1's and s_2's
list_signatures = [sig[1][0] + sig[1][1] for sig in list_signatures]
end = time.time()
print("Took {t:.2f} seconds to generate the samples.".format(t=end - start))
# Perform the statistical test
start = time.time()
samples_data = MultivariateSamples(sk.sigma, list_signatures)
end = time.time()
print("Took {t:.2f} seconds to run a statistical test.".format(t=end - start))
return sk, samples_data
def test_rejind(mu, sigma):
"""
input assumes dataset with num rejs (a) for each output (b)
to form the data structure [(a,b)]*n to test for independence
"""
# parameters to generate data
n = 10000
mu = 0
nb_mu = 100
sigma = 1.5
q = 12289
# assumed data input for testing:
# output given as a tuple (x,#reps)
data = [samplerz_rep(mu, sigma) for _ in range(n)]
counter = Counter(map(tuple,data))
values, rejects = zip(*data)
results = []
mu = 0
for i in range(nb_mu):
list_samples = [samplerz_rep(mu, sigma) for _ in range(n)]
counter = dict(Counter(map(tuple, list_samples)))
result = defaultdict(int)
for key in sorted(counter.keys()):
result[key[1]] += int(counter[key])
result = dict(result)
results.append(result)
mu += q / nb_mu
# sort data
df = pandas.DataFrame(results)
df = df.fillna(0)
df = df.sort_index(axis=1)
print(df)
# plot
plt.figure(figsize=(24, 5))
plt.pcolor(df)
plt.colorbar
plt.yticks(arange(0, len(df.index), step=10), fontsize=17)
plt.xticks(arange(0.5, len(df.columns), 1), df.columns, fontsize=17)
plt.rcParams["axes.grid"] = False
plt.xlim((0, 9))
plt.xlabel('Number of Rejections', fontsize=21)
plt.ylabel('Dataset Number', fontsize=21)
plt.savefig('rejections.eps', format='eps', bbox_inches="tight", pad_inches=0)
plt.show()
def test_basesampler(mu, sigma):
"""
A set of visual tests, assuming you have failed some tests,
either for univariate data input or generated below.
"""
# generate data
n = 100000
mu = 0
sigma = 1.5
data = [samplerz(mu, sigma) for _ in range(n)]
# histogram
hist, bins = histogram(data, bins=abs(min(data)) + max(data))
x = bins[:-1]
y1 = hist
y2 = norm.pdf(x, mu, sigma) * n
plt.bar(x, y1, width=1.25, color='blue', edgecolor='none', label='Gauss Samples')
plt.plot(x, y2, '-r', label='Gauss Expected')
plt.xlabel('$x$')
plt.ylabel('pdf$(x)$')
plt.legend(loc='upper right')
plt.title("Gaussian Samples, Observed vs Expected", fontweight="bold", fontsize=12)
plt.savefig('histogram.eps', format='eps', bbox_inches="tight", pad_inches=0)
# qqplot
r2 = stats.linregress(sort(hist), sort(y2))[2] ** 2
plt.title('R-Squared = %0.20f' % r2, fontsize=9)
plt.suptitle("QQ plot for Univariate Normality of Gaussian Samples", fontweight="bold", fontsize=12)
plt.savefig('qqplot_test.eps', format='eps', bbox_inches="tight", pad_inches=0)
plt.show()
#######################
# Supplementary Stuff #
#######################
def estimate_sphericity(covar_matrix):
"""
Given a sample covariance matrix (in pandas DataFrame format), compute
the box-index and centered box-index of the matrix.
Both values should tend to 1 if the sample covariance matrix is the identity matrix
"""
covariance = array(covar_matrix)
dim = len(covariance)
# Compute an estimator of the population covariance matrix
# from the sample covariance matrix
cov_mean = array([row.mean() for row in covariance])
cov_meanmean = cov_mean.mean()
cov_centered = deepcopy(covariance)
for i in range(dim):
for j in range(dim):
cov_centered[i][j] += cov_meanmean - cov_mean[i] - cov_mean[j]
# Compute the (centered) Box index
box_num = sum(covariance[i][i] for i in range(dim)) ** 2
box_den = (dim - 1) * sum(covariance[i][j] ** 2 for i in range(dim) for j in range(dim))
box_index = box_num / box_den
cbox_num = sum(cov_centered[i][i] for i in range(dim)) ** 2
cbox_den = (dim - 1) * sum(cov_centered[i][j] ** 2 for i in range(dim) for j in range(dim))
cbox_index = cbox_num / cbox_den
# Compute eigenvalues (the eigenvalues of the centered and
# non-centered covariance matrices seem to be the same!)
eigen = eig(cov_centered)[0]
# Compute V
V = (sum(elt for elt in eigen) ** 2) / sum(elt ** 2 for elt in eigen)
statistic = ((dim - 1) ** 2) * (V - 1 / (dim - 1)) / (2 * dim)
df = (dim * (dim - 1) / 2) - 1
print("statistic = {s}".format(s=statistic))
print("deg of freedom = {d}".format(d=df))
return box_index, cbox_index
def qqplot(data):
"""
https://www.itl.nist.gov/div898/handbook/eda/section3/qqplot.htm
"""
data = pandas.DataFrame(data)
data = deepcopy(data)
S = cov(data.transpose(), bias=1)
n = len(data)
p = len(data.columns)
means = [list(data.mean())] * n
difT = data - pandas.DataFrame(means)
Dj = diag(difT.dot(inv(S)).dot(difT.transpose()))
Y = data.dot(inv(S)).dot(data.transpose())
Ytdiag = array(pandas.DataFrame(diag(Y.transpose())))
Djk = - 2 * Y.transpose()
Djk += tile(Ytdiag, (1, n)).transpose()
Djk += tile(Ytdiag, (1, n))
Djk_quick = []
for i in range(n):
Djk_quick += list(Djk.values[i])
chi2_random = chi2.rvs(p - 1, size=len(Dj))
chi2_random = sort(chi2_random)
r2 = stats.linregress( | sort(Dj) | numpy.sort |
import cv2
import numpy as np
import math
def _np_resize_to_range(image, min_size, max_size):
im_shape = image.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = min(min_size / im_size_min, max_size / im_size_max)
image = cv2.resize(
image,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR
)
return image, im_scale
def _np_mean_std_normalize(image, mean=(123.675, 116.28, 103.53), std=(58.395, 57.12, 57.375)):
"""
Args:
image: 3-D array of shape [height, width, channel]
mean: a list or tuple or ndarray
std: a list or tuple or ndarray
Returns:
"""
if not isinstance(mean, np.ndarray):
mean = np.array(mean, np.float32)
if not isinstance(std, np.ndarray):
std = np.array(std, np.float32)
shape = [1] * image.ndim
shape[-1] = -1
return (image - mean.reshape(shape)) / std.reshape(shape)
def _np_random_crop(image, crop_size):
"""
Args:
image: 3-D tensor of shape [h, w, c]
crop_size: a tuple of (crop_h, crop_w)
Returns:
"""
im_h, im_w, _ = image.shape
c_h, c_w = crop_size
pad_h = c_h - im_h
pad_w = c_w - im_w
if pad_h > 0 or pad_w > 0:
image = np.pad(image, [[0, max(pad_h, 0)], [0, max(pad_w, 0)], [0, 0]], mode='constant', constant_values=0)
im_h, im_w, _ = image.shape
y_lim = im_h - c_h + 1
x_lim = im_w - c_w + 1
ymin = int(np.random.randint(0, y_lim, 1))
xmin = int( | np.random.randint(0, x_lim, 1) | numpy.random.randint |
# coding: utf-8
# In[1]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import logging
import os
import sys
from scipy import interpolate, optimize, integrate
from time import gmtime, strftime
#import matplotlib.pyplot as plt
#from matplotlib import ticker, cm
#from mpl_toolkits.mplot3d import Axes3D
#from matplotlib.ticker import LinearLocator, FormatStrFormatter
# In[3]:
def read_tri_lodeAlt_eps_from_fem(fileName):
"""
read all sheets in xls file
input:
fileName
output:
listEta, listThetaBar, listEpsilon in sheets
"""
xlsx = pd.ExcelFile(fileName)
etaThetaEpsilon = []
for sheet in xlsx.sheet_names:
#print(sheet)
df = pd.read_excel(xlsx, sheet)
eta = df.loc[:, 'TRI'].values.reshape(-1, 1)
'''
lsdyna has different defination of lode parameter 27J3/2*sigma^3 than mmc,
transfer lsdyna result to mmc model
'''
lodeAlt = df.loc[:, 'LODEALT'].values.reshape(-1, 1)
thetaBar = 1 - 2 / np.pi * np.arccos(lodeAlt)
eplison = df.loc[:, 'EPS'].values.reshape(-1, 1)
ind = np.where(eplison>0)
#print(sheet)
#print(ind)
ete = np.hstack([eta, thetaBar, eplison])
etaThetaEpsilon.append(ete[ind[0],:])
#logging.debug(etaThetaEpsilon)
etaThetaEpsilon = np.array(etaThetaEpsilon)
return etaThetaEpsilon
# In[4]:
"""
tests = read_tri_lodeAlt_eps_from_fem('FEM.xlsx')
fig = plt.figure()
ax = fig.gca(projection='3d')
plots = []
labels = []
ii = 0
for test in tests:
xx, yy, zz = test[:, 0], test[:, 1], test[:, 2]
pl = ax.plot(xx, yy, zz, label=str(ii))#, cmap=cm.coolwarm,linewidth=0, antialiased=False)
plots.append(pl)
labels.append(str(ii))
ii = ii + 1
ax.legend()#
ax.xaxis.set_major_locator(LinearLocator(5))
ax.yaxis.set_major_locator(LinearLocator(5))
ax.set_xlabel('Triaxiality')
ax.set_ylabel('Lode Parameter')
ax.set_zlabel('Effective plastic strain')
plt.show()
"""
# In[6]:
def accumulatedDamage(model, eta, thetaBar, epsilon, param):
"""
Accumulating Damage Model
input:
model: Damage mode
eta: eta list
thetaBar: theraBar list
epsilon: epsilon List
param: A, n, c1, c2
output:
damage Indicator
"""
#'epsilonMMC = np.zeros(eta.shape)'
#'for ii, (eta, thetaBar) in enumerate(zip(eta, thetaBar)):'
epsilonPlasticFailure = model(eta, thetaBar, param)
#'epsilonMMC[ii] = epsilonPlaticFailure'
damageIndicator = integrate.trapz(1/ epsilonPlasticFailure, epsilon)
#np.sum(epsilon / epsilonPlasticFailure)
#integrate.tr (1 / epsilonPlaticFailure, epsilon)
#print(epsilonPlasticFailure)
return damageIndicator
# In[8]:
def testResultsAcc(param, tests):
"""
tests result to damgaIndicator
"""
indicators = | np.zeros([tests.shape[0]]) | numpy.zeros |
from __future__ import division, print_function, absolute_import
import os
import traceback
import scipy.misc as misc
import matplotlib.pyplot as plt
import numpy as np
import glob
import pandas as pd
import random
from PIL import Image, ImageOps
def get_data_A1A4(data_path, split_load):
# Getting images (x data)
imgname_train_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(h)+'/*.png') for h in split_load[0]])
imgname_train_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(h)+'/*.png') for h in split_load[0]])
imgname_val_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(split_load[1])+'/*.png')])
imgname_val_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(split_load[1])+'/*.png')])
imgname_test_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(split_load[2])+'/*.png')])
imgname_test_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(split_load[2])+'/*.png')])
filelist_train_A1 = list(np.sort(imgname_train_A1.flat)[1::2])
filelist_train_A4 = list(np.sort(imgname_train_A4.flat)[1::2])
filelist_train_A1_fg = list( | np.sort(imgname_train_A1.flat) | numpy.sort |
import numpy as np
import cv2
import pickle
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from common import *
threshold = {'x':(20, 100), 'y':(20, 100) ,'m':(30, 100) , 'd':(0.7, 1.3)}
def canny_test():
#fig = plt.figure()
# Read in the image and convert to grayscale
image = mpimg.imread(img_file)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if False:
# Define a kernel size for Gaussian smoothing / blurring
# Note: this step is optional as cv2.Canny() applies a 5x5 Gaussian internally
kernel_size = 7#15 #has to be odd
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size), 0)
else:
blur_gray = gray
# Define parameters for Canny and run it
# NOTE: if you try running this code you might want to change these!
low_threshold = 50#50
high_threshold = 100#110
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
f0 = plt.figure(0)
plt.imshow(edges, cmap='Greys_r')
plt.title("canny")
f0.show()
# Display the image
#fig1 = plt.figure(1)
#plt.imshow(gray, cmap='Greys_r')
#fig2 = plt.figure(2)
#plt.imshow(blur_gray, cmap='Greys_r')
#fig1.show()
#plt.show()
if False:
fig3 = plt.figure(3)
for i in range(1,50,5):
low_threshold = i # 50
high_threshold = 100 # 110
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
plt.imshow(edges, cmap='Greys_r')
#fig2.show()
plt.show()
plt.show()
def sobel(gray, x, y, thresh_min = 20, thresh_max = 100):
sobel = cv2.Sobel(gray, cv2.CV_64F, x, y)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
sbinary = np.zeros_like(scaled_sobel)
sbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return scaled_sobel, sbinary
def sobel_filter():
image = mpimg.imread(img_file)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
scaled_sobel, sbinary = sobel(gray, 0, 1)
f0 = plt.figure(0)
plt.imshow(scaled_sobel, cmap='gray')
plt.title("scaled_sobel")
f0.show()
f1 =plt.figure(1)
plt.imshow(sbinary, cmap='gray')
plt.title("sbinary")
f1.show()
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=threshold['x']):
orientation ={'x':(1, 0), 'y':(0, 1)}
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobel = cv2.Sobel(gray, cv2.CV_64F,*orientation[orient], ksize=sobel_kernel)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# Calculate directional gradient
# Apply threshold
return grad_binary
def mag_thresh(img, sobel_kernel=3, mag_thresh=threshold['m']):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the gradient in x and y separately
# 3) Calculate the magnitude
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
# 5) Create a binary mask where mag thresholds are met
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel)
#mag, ang = cv2.cartToPolar(sobelx, sobely)
sobel_m = np.sqrt(np.square(sobelx)+np.square(sobely))
sobelm = np.uint8(255 * sobel_m / np.max(sobel_m))
binary_output = np.zeros_like(sobelm)
binary_output[(sobelm >= mag_thresh[0]) & (sobelm <= mag_thresh[1])] = 1
return binary_output
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=threshold['d']):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the gradient in x and y separately
# 3) Take the absolute value of the x and y gradients
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
# 5) Create a binary mask where direction thresholds are met
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
abs_sobelx = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel))
abs_sobely = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel))
sobel_slope = | np.arctan2(abs_sobely, abs_sobelx) | numpy.arctan2 |
### Non dimensional parameters vs. metrics
from math import *
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import pylab as pl
import scipy.io
import scipy as spy
import seaborn as sns
import sys
#lib_path = os.path.abspath('../../Building_canyon/BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
lib_path = os.path.abspath('../BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
sys.path.append(lib_path)
import ReadOutTools_MITgcm as rout
import MetricsPythonTools as mpt
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------
def get_metrics(exp, run, TrNum, key):
'''Get column from a tracer metrics pandas dataframe using the key name, run (01, 02, etc) and experiment
abreviated name (BAR, CNTDIFF, 3DDIFF, NOGMREDI). All input variables are strings. Returns the desired column from the dataframe'''
df = pd.read_csv(('results/metricsDataFrames/%srun%sTr%s.csv' %(exp,run,TrNum)))
col = df[key]
return col
def get_water(exp, run, key):
'''Get column from a tracer metrics pandas dataframe using the key name, run (01, 02, etc) and experiment
abreviated name (BAR, CNTDIFF, 3DDIFF, NOGMREDI). All input variables are strings. Returns the desired column from the dataframe'''
df = pd.read_csv(('results/metricsDataFrames/%srun%s.csv' %(exp,run)))
col = df[key]
return col
def get_areas(file, key):
'''Get column from a tracer metrics pandas dataframe using the key name, run (01, 02, etc) and experiment
abreviated name (BAR, CNTDIFF, 3DDIFF, NOGMREDI). All input variables are strings. Returns the desired column from the dataframe'''
df = pd.read_csv(file)
col = df[key]
return col
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------
sns.set()
sns.set_style('darkgrid')
sns.set_context('poster')
#Exp
CGrid = '/Users/Karina/Research/PhD/Tracers/TemporaryData/BARKLEY/run01/gridGlob.nc' # Smallest volume grid, closed bdy, no canyon.
#CGrid = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run03/gridGlob.nc' # Smallest volume grid, closed bdy, no canyon.
CGridOut = Dataset(CGrid)
# General input
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
numTr = 24 # number of tracers in total (CNT =20, 3D = 4)
rc = CGridOut.variables['RC']
xc = rout.getField(CGrid, 'XC') # x coords tracer cells
yc = rout.getField(CGrid, 'YC') # y coords tracer cells
drF = CGridOut.variables['drF'] # vertical distance between faces
drC = CGridOut.variables['drC'] # vertical distance between centers
labels = ['$K_v=10^{-7}$(out), $10^{-3}$(in), $K_i=1 m^2s^{-1}$','Kv=1E-7(out), 1E-4(in), Ki=1','Kv=1E-5(out), 1E-3(in), Ki=1',
'Kv=1E-5(out), 1E-4(in), Ki=1','Kv=1E-5, Ki=1','Kv=1E-4, Ki=1','Kv=1E-3, Ki=1','Kv=3.8E-5, Ki=10',
'Kv=2.8E-5, Ki=10','Kv=1.3E-5, Ki=10','Kv_noc=1E-5, Ki=1','Kv_noc=1E-4, Ki=1','Kv_noc=1E-3, Ki=1',
'Kv=1E-5, Ki=10','Kv=1E-4, Ki=10','Kv=1E-3, Ki=10','Kv=1E-5, Ki=0.1','Kv=1E-4, Ki=0.1',
'Kv=1E-3, Ki=0.1','Kv=3.8E-5, Ki=1','Kv=2.8E-5, Ki=1','Kv=1.3E-5, Ki=1','Kv=1E-4, Ki=1, Kt=Ks','Kv=1E-3, Ki=1,Kt=Ks']
wlabels = ['run04 - 3D','run05 - 3D','run06 - 3D','run07 - 3D','run02 - CNT','run03 - CNT','run04 - CNT',
'run07 - CNT','run09 - CNT','run10 - CNT','run11 - CNT','run12 - CNT']
times = np.arange(0,nt,1)
# LOAD AREAS
CS1A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS1area' )
CS2A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS2area' )
CS3A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS3area' )
CS3sbA = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS3sbarea' )
CS4A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS4area' )
CS5A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS5area' )
AS1A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'AS1area' )
AS2A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'AS2area' )
LID1A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'LID1area' )
LID2A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'LID2area' )
VolHole = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'VolHole' )
VoleShwoHole = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'VolShNoHole' )
tracers_3D = ['04','05','06','07'] #run number because there's only 1 tr per run
tracers_CNT03 = ['1','2','3'] # tracer number , constant runs
tracers_CNT09 = ['1','2','3'] # tracer number , average diffusivity runs
tracers_CNT07 = ['1','2','3'] # tracer number , no canyon case
tracers_CNT02 = ['1','2','3'] # tracer number , Kiso=0.1
tracers_CNT04 = ['1','2','3'] # tracer number , Kiso=10
tracers_CNT10 = ['1','2','3'] # tracer number , Kiso=1
tracers_CNT11 = ['2'] # tracer number , Kiso=1, Ks=Kt=10^4
tracers_CNT12 = ['3'] # tracer number , Kiso=1, Ks=Kt=10^3
# LOAD TRACER ON SHELF DATA
TrOnSh = np.zeros((nt,numTr))
HWC = np.zeros((nt,numTr))
kk = 0
fields = ['TronShelfwHole', 'HCWonShelfwHole','TronHole','HCWonHole']
for ii in tracers_3D:
TrOnShwHole = get_metrics('3DDIFF_hole_', ii, '1', fields[0] )
TrOnHole = get_metrics('3DDIFF_hole_', ii, '1', fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('3DDIFF_hole_', ii, '1', fields[1] )
HWChole = get_metrics('3DDIFF_hole_', ii, '1', fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT03:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '03',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '03', ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '03', ii,fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '03',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT09:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '09',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_','09',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '09',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '09',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT07:
TrSh = get_metrics('CNTDIFF_hole_', '07', ii, fields[0] )
TrHole= get_metrics('CNTDIFF_hole_', '07', ii, fields[2] )
HWCSh = get_metrics('CNTDIFF_hole_', '07', ii, fields[1] )
HWCHole = get_metrics('CNTDIFF_hole_', '07', ii, fields[3] )
TrOnSh[:,kk] = TrHole
HWC[:,kk] = HWCHole
kk=kk+1
for ii in tracers_CNT02:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '02',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '02',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '02',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '02', ii,fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT04:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '04',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '04',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '04',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '04',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT10:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '10',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '10',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '10',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '10',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT11:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '11',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '11',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '11',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '11',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT12:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '12',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '12',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '12',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '12',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
# LOAD TRANSPORTS
CS1 = np.zeros((nt-1,numTr))
CS2 = np.zeros((nt-1,numTr))
CS3 = np.zeros((nt-1,numTr))
CS4 = np.zeros((nt-1,numTr))
CS5 = np.zeros((nt-1,numTr))
CS3sb = np.zeros((nt-1,numTr))
AS1 = np.zeros((nt-1,numTr))
AS2 = np.zeros((nt-1,numTr))
LID1 = np.zeros((nt-1,numTr))
LID2 = np.zeros((nt-1,numTr))
CS1a = np.zeros((nt-1,numTr))
CS2a = np.zeros((nt-1,numTr))
CS3a = np.zeros((nt-1,numTr))
CS4a = np.zeros((nt-1,numTr))
CS5a = np.zeros((nt-1,numTr))
CS3sba = np.zeros((nt-1,numTr))
AS1a = np.zeros((nt-1,numTr))
AS2a = np.zeros((nt-1,numTr))
LID1a = np.zeros((nt-1,numTr))
LID2a = np.zeros((nt-1,numTr))
CS1d = np.zeros((nt-1,numTr))
CS2d = np.zeros((nt-1,numTr))
CS3d = np.zeros((nt-1,numTr))
CS4d = np.zeros((nt-1,numTr))
CS5d = np.zeros((nt-1,numTr))
CS3sbd = np.zeros((nt-1,numTr))
AS1d = np.zeros((nt-1,numTr))
AS2d = np.zeros((nt-1,numTr))
LID1d = np.zeros((nt-1,numTr))
LID2d = np.zeros((nt-1,numTr))
kk = 0
fields = ['CS1','CS2','CS3','CS3sb','CS4','CS5','AS1' ,'AS2','LID1' ,'LID2']
fieldsDiff = ['CS1','CS2','CS3','CS3sb','CS4','CS5','AS1' ,'AS2','LID1' ,'LID2','LID1i' ,'LID2i']
for ii in tracers_3D:
CS1a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[0] )
CS2a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[1] )
CS3a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[2] )
CS3sba[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[3] )
CS4a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[4] )
CS5a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[5] )
AS1a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[6] )
AS2a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[7] )
LID1a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[8] )
LID2a[:,kk] = get_metrics('3DDIFF_CS_ADVFLUX_', ii, '1', fields[9] )
CS1d[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[0] )
CS2d[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[1] )
CS3d[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[3] )
CS4d[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[4] )
CS5d[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[5] )
AS1d[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[6] )
AS2d[:,kk] = get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[8] )
+get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[9] )
+get_metrics('3DDIFF_CS_DIFFFLUX_', ii, '1', fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT03:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '03', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[5] )
AS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '03', ii, fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT09:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '09', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[5] )
AS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '09', ii, fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT07:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '07', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[5] )
AS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '07', ii, fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT02:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '02', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[5] )
AS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '02', ii, fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT04:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '04', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '04',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[5] )
AS1d[:,kk] =get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '04', ii, fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT10:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '10', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[5] )
AS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '10', ii, fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT11:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '11', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[5] )
AS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '11', ii, fieldsDiff[11] ))
kk=kk+1
for ii in tracers_CNT12:
CS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[0] )
CS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[1] )
CS3a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[2] )
CS3sba[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[3] )
CS4a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[4] )
CS5a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[5] )
AS1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[6] )
AS2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[7] )
LID1a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[8] )
LID2a[:,kk] = get_metrics('CNTDIFF_CS_ADVFLUX_', '12', ii, fields[9] )
CS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12',ii, fieldsDiff[0] )
CS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[1] )
CS3d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[2] )
CS3sbd[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[3] )
CS4d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[4] )
CS5d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[5] )
AS1d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[6] )
AS2d[:,kk] = get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[7] )
LID1d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[8] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[10] ))
LID2d[:,kk] = (get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[9] )
+get_metrics('CNTDIFF_CS_DIFFFLUX_', '12', ii, fieldsDiff[11] ))
kk=kk+1
CS1 = CS1a +CS1d
CS2 = CS2a +CS2d
CS3 = CS3a +CS3d
CS4 = CS4a +CS4d
CS5 = CS5a+ CS5d
CS3sb = CS3sba +CS3sbd
AS1 = AS1a +AS1d
AS2 = AS2a +AS2d
LID1 = LID1a+ LID1d
LID2 = LID2a +LID2d
# LOAD WATER TRANSPORT
numWat = 12
water_3D = ['04','05','06','07'] #run number
water_CNT = ['02','03','04','07','09','10','11','12'] # run number , constant runs
wCS1 = np.zeros((nt-1,numWat))
wCS2 = np.zeros((nt-1,numWat))
wCS3 = np.zeros((nt-1,numWat))
wCS4 = np.zeros((nt-1,numWat))
wCS5 = np.zeros((nt-1,numWat))
wCS3sb = np.zeros((nt-1,numWat))
wAS1 = np.zeros((nt-1,numWat))
wAS2 = | np.zeros((nt-1,numWat)) | numpy.zeros |
#
# Compare masks produced by BBs, axis-aligned ellipses and full ellipses
# with the GT maks. Also explores OBBs from segmentation masks
#
# IMPORTANT: ignores segmentation masks that formed by mopre than one connected component
#
#
import cv2, os, pickle
import numpy as np
from numpy import sqrt
import matplotlib.pyplot as plt
import scipy.stats as st
from pycocotools.coco import COCO
from pycocotools import mask as coco_mask
from glob import glob
from sklearn.datasets.samples_generator import make_blobs
from pdb import set_trace as pause
from math import atan2
##from obb import segmentations_to_rotation_bboxs_jung, rotation_bboxs_to_poly, from_poly_to_binary
#from obb_jeffri import segmentation_to_rotation_bbox, poly_to_binary
from matplotlib.patches import Ellipse
#import matplotlib.transforms as transforms
import pandas as pd
import seaborn as sns
def segmentation_to_rotation_bbox(ann):
"""
Format an coco annotation to a OBB
Input: annotations extracted with cocoAPI, list of dictionaries
Output: bboxs -> array of number de (annotations,5) with (x,y,w,h,angle)
list_point -> list of multiple (2,4) matrix, that denote corners of the object
"""
bbox = | np.zeros(5) | numpy.zeros |
import numpy as np
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
:param seg: (dict) the current segment of the trajectory (see traj_segment_generator return for more information)
:param gamma: (float) Discount factor
:param lam: (float) GAE factor
"""
# last element is only used for last vtarg, but we already zeroed it if last new = 1
episode_starts = | np.append(seg["episode_starts"], False) | numpy.append |
import numpy as np
import scipy.stats.distributions as sc_dist
from itertools import compress
def aggarwal_limits(mu, alpha=0.68268949):
"""Get Poissonian limits for specified contour levels
Parameters
----------
pdfs : array_like
The expected number of events (Poisson mean) in each observable bin.
Shape: [n_bins]
alpha : float or list of float, optional
The list of alpha values, which define the contour levels which will
be computed.
Returns
-------
array_like
The lower limits (minus -0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
array_like
The upper limits (plus +0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
"""
if isinstance(alpha, float):
alpha = [alpha]
mu_large = np.zeros((len(mu), len(alpha)))
alpha_large = np.zeros_like(mu_large)
for i, a_i in enumerate(alpha):
alpha_large[:, i] = a_i
mu_large[:, i] = mu
mu_large_flat = mu_large.reshape(np.prod(mu_large.shape))
alpha_large_flat = alpha_large.reshape(mu_large_flat.shape)
lower, upper = sc_dist.poisson.interval(alpha_large_flat, mu_large_flat)
lower[lower != 0] -= 0.5
upper += 0.5
return lower.reshape(mu_large.shape), upper.reshape(mu_large.shape)
def aggarwal_limits_pdf(pdfs, ks, alpha=0.68268949):
"""Get limits for specified contour levels
In contrast to `aggarwal_limits` this function computes the limits based
on the evaluated and normalized likelihood as opposed to the theoretical
limits from the Poisson disribution.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
alpha : float or list of float, optional
The list of alpha values, which define the contour levels which will
be computed.
Returns
-------
array_like
The lower limits (minus -0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
array_like
The upper limits (plus +0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
"""
if isinstance(alpha, float):
alpha = [alpha]
lower = np.zeros((len(pdfs), len(alpha)))
upper = np.zeros((len(pdfs), len(alpha)))
for i, pdf in enumerate(pdfs):
if len(ks[i]) == 0:
continue
cdf = np.cumsum(pdf)
if cdf[-1] < 0.999:
print('Cdf only goes up to {}'.format(cdf[-1]))
lower[i, :] = np.nan
upper[i, :] = np.nan
continue
for j, alpha_j in enumerate(alpha):
q1 = (1.-alpha_j) / 2.
q2 = (1.+alpha_j) / 2.
lower_idx = np.searchsorted(cdf, q1)
upper_idx = np.searchsorted(cdf, q2)
lower[i, j] = ks[i][lower_idx]
upper[i, j] = ks[i][upper_idx]
lower[lower != 0] -= 0.5
upper += 0.5
return lower, upper
def evaluate_normalized_likelihood(llh_func, coverage,
first_guess, **llh_kwargs):
"""Compute normalized likelihood
This function evaluates the likelihood function `llh_func` iteratively over
possible values of k (observed number of events in Poissonian) until
the specified coverage is reached.
This can then be used to normalize the likelihood and to define the PDF
in observed values k and to compute the limits in k.
Parameters
----------
llh_func : callable
The likelihood function
coverage : float
The minimum coverage value to obtain. Max value is 1. The closer to
1, the more accurate, but also more time consuming.
first_guess : float
A first guess of the valid range of k values. Typically, this can
be set to the expected number of values in the observable bin.
**llh_kwargs
Keyword arguments that are passed on to the likelihood function.
Returns
-------
array_like
The (sorted) k values at which the likelhood was evaluted.
array_like
The corresponding likelihood values to each of the (sorted) k values.
These are normalized, i.e. their sum should approach 1, but be at
least as high as the specified `coverage`.
"""
mu = int(first_guess)
prob = np.exp(llh_func(mu, **llh_kwargs))
unsorted_pdf = [prob]
ks = [mu]
max_k = mu
min_k = mu
reached_bottom = False
while prob < coverage:
if not reached_bottom:
if min_k == 0:
reached_bottom = True
else:
min_k -= 1
ks.append(min_k)
new_val = np.exp(llh_func(min_k, **llh_kwargs))
unsorted_pdf.append(
new_val)
prob += new_val
max_k += 1
ks.append(max_k)
new_val = np.exp(llh_func(max_k, **llh_kwargs))
unsorted_pdf.append(new_val)
prob += new_val
ks = np.array(ks)
unsorted_pdf = np.array(unsorted_pdf)
sort_idx = np.argsort(ks)
sorted_ks = ks[sort_idx]
sorted_pdf = unsorted_pdf[sort_idx]
return sorted_ks, sorted_pdf
def map_aggarwal_ratio(y_values, y_0=1., upper=True):
"""Map p-values to relative y-values wrt minimium p-value.
The provided p-values `y_values` are mapped to relative y-values.
These transformed y-values are relative to the minimum p-value (in log10).
Depending on whether or not `upper` is True, these relative values will
either be positive or negative. In other words, the p-values are mapped
to y-values in the range of [0, 1] for upper == True and [-1, 0] for
upper == False.
Parameters
----------
y_values : array_like
The p-values for each observable bin.
Shape: [n_bins]
y_0 : float, optional
The highest possible p-value. Anything above this is set to NaN, i.e.
it will not be plotted later.
upper : bool, optional
If True, the ratios are above the expectation values, i.e. the
transformed values will be in the range of [0, 1].
If False, the ratios are below the expectation values in each bin
and the transformed values will be in the range of [-1, 0].
Returns
-------
array_like
The transformed y-values for each of the p-values `y_values`.
Shape: [n_bins]
"""
flattened_y = np.copy(y_values.reshape(np.prod(y_values.shape)))
finite = np.isfinite(flattened_y)
finite_y = flattened_y[finite]
if len(finite_y) == 0:
return y_values, 0.
finite_y[finite_y > y_0] = np.NaN
finite_y = np.log10(finite_y)
y_min = np.min(finite_y)
y_min *= 1.1
finite_y /= y_min
transformed_values = np.copy(flattened_y)
transformed_values[finite] = finite_y
is_nan = np.isnan(flattened_y)
is_pos_inf = np.isposinf(flattened_y)
is_neg_inf = np.isneginf(flattened_y)
got_divided_by_zero = flattened_y == 1.
if upper:
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = -np.inf
else:
transformed_values[finite] *= -1.
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = -np.inf
transformed_values[got_divided_by_zero] = 0
transformed_values = transformed_values.reshape(y_values.shape)
return transformed_values, y_min
def map_aggarwal_limits(y_values, y_0=1., upper=True):
"""Map p-values to relative y-values wrt minimium p-value.
The provided p-values `y_values` are mapped to relative y-values.
These transformed y-values are relative to the minimum p-value (in log10).
Depending on whether or not `upper` is True, these relative values will
either be positive or negative. In other words, the p-values are mapped
to y-values in the range of [0, 1] for upper == True and [-1, 0] for
upper == False.
This function is similar to `map_aggarwal_ratio`, but the handling
of positive and negative infinities are different. These are set to finite
values, such that appropriate limit contours may be drawn.
Parameters
----------
y_values : array_like
The p-values for each observable bin.
Shape: [n_bins]
y_0 : float, optional
The highest possible p-value. Anything above this is set to NaN, i.e.
it will not be plotted later.
upper : bool, optional
If True, the limits are upper limits, i.e. the
transformed values will be in the range of [0, 1].
If False, the limits are lower limits and the transformed values will
be in the range of [-1, 0].
Returns
-------
array_like
The transformed y-values for each of the p-values `y_values`.
Shape: [n_bins]
"""
flattened_y = np.copy(y_values.reshape(np.prod(y_values.shape)))
finite = np.isfinite(flattened_y)
finite_y = flattened_y[finite]
if len(finite_y) == 0:
return y_values, 0.
finite_y[finite_y > y_0] = np.NaN
finite_y = np.log10(finite_y)
y_min = np.min(finite_y)
y_min *= 1.1
finite_y /= y_min
transformed_values = np.copy(flattened_y)
transformed_values[finite] = finite_y
is_nan = np.isnan(flattened_y)
is_pos_inf = np.isposinf(flattened_y)
is_neg_inf = np.isneginf(flattened_y)
if upper:
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = 0.
else:
transformed_values[finite] *= -1.
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = -8000
transformed_values[is_neg_inf] = -8000
transformed_values = transformed_values.reshape(y_values.shape)
return transformed_values, y_min
def rescale_ratio(values, y_min, y_min_wanted):
"""Rescale relative y-values
Rescales relative y-values `values` to `y_min_wanted`. It is assumed
that the provied values are relative to the minimum p-value as specified
in the provided `y_min`.
Similar to `rescale_limit`, but does additional handling of points that
are outside of the plot region (these are set to inf, such that they will
not be plotted).
Parameters
----------
values : array_like
The relative y-values that should be rescaled.
Shape: [n_bins]
y_min : float
The minimum p-value. This is the anchor point to which the original
p-values were scaled to, i.e. `values` are relative to this minimum
p-value.
Shape: []
y_min_wanted : flaot
The desired new minimum p-value. This is the new anchor point to which
the original p-values will be re-scaled to.
Shape: []
Returns
-------
array_like
The rescaled y-values now relative to `y_min_wanted`.
Shape: [n_bins]
"""
values = np.copy(values)
finite = np.isfinite(values)
factor = y_min / y_min_wanted
values[finite] *= factor
finite_values = values[finite]
finite_values[np.absolute(finite_values) > 1] = np.inf
values[finite] = finite_values
return values
def rescale_limit(values, y_min, y_min_wanted):
"""Rescale relative y-values
Rescales relative y-values `values` to `y_min_wanted`. It is assumed
that the provied values are relative to the minimum p-value as specified
in the provided `y_min`.
Parameters
----------
values : array_like
The relative y-values that should be rescaled.
Shape: [n_bins]
y_min : float
The minimum p-value. This is the anchor point to which the original
p-values were scaled to, i.e. `values` are relative to this minimum
p-value.
Shape: []
y_min_wanted : flaot
The desired new minimum p-value. This is the new anchor point to which
the original p-values will be re-scaled to.
Shape: []
Returns
-------
array_like
The rescaled y-values now relative to `y_min_wanted`.
Shape: [n_bins]
"""
values = np.copy(values)
finite = np.isfinite(values)
factor = y_min / y_min_wanted
values[finite] *= factor
return values
def calc_p_alpha_limits(mu, rel_std):
"""Get the CDF ratio at the limits `rel_std` in each observable bin.
Parameters
----------
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
rel_std : array_like
The relative limits wrt the expected number (Poisson mean) of events
in each bin, i.e. limits / mu. The last dimension corresponds to lower
and upper relative limits, respectively.
Shape: [n_bins, n_alpha, 2]
array_like
The ratio of the PDF tails:
P(x <= limit_i) / P(x <= mu_i) if limit_i <= mu_i
P(x > limit_i) / P(x > mu_i) if limit_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the expected number of events
in each observable bin and under the assumption of a Poisson
distribution.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins, n_alpha, 2]
"""
abs_std = np.zeros_like(rel_std)
limits = np.zeros_like(rel_std)
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 0]
returned_vals = __calc_p_alpha__(mu, abs_std, upper=False)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 0] = returned_vals
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 1]
returned_vals = __calc_p_alpha__(mu, abs_std, upper=True)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 1] = returned_vals
return limits
def calc_p_alpha_limits_pdf(pdfs, ks, mu, rel_std):
"""Get the CDF ratio at the limits `rel_std` in each observable bin.
Similar to `calc_p_alpha_limits`, but the CDF calculation is based on the
normalized likelihood values `pdfs` and corresponding k values `ks`.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
rel_std : array_like
The relative limits wrt the expected number (Poisson mean) of events
in each bin, i.e. limits / mu. The last dimension corresponds to lower
and upper relative limits, respectively.
Shape: [n_bins, n_alpha, 2]
Returns
-------
array_like
The ratio of the PDF tails:
P(x <= limit_i) / P(x <= mu_i) if limit_i <= mu_i
P(x > limit_i) / P(x > mu_i) if limit_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the normalized likelihood
values `pdfs` and corresponding k values `ks`.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins, n_alpha, 2]
"""
abs_std = np.zeros_like(rel_std)
limits = np.zeros_like(rel_std)
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 0]
returned_vals = __calc_p_alpha_pdf__(pdfs, ks,
mu, abs_std,
upper=False)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 0] = returned_vals
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 1]
returned_vals = __calc_p_alpha_pdf__(pdfs, ks,
mu, abs_std,
upper=True)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 1] = returned_vals
return limits
def __calc_p_alpha__(mu, k, upper=True):
"""Get the CDF ratio at a given number of observed events k in each bin.
Parameters
----------
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
The CDF ratio is evaluated at these k values.
Shape: [n_bins]
upper : bool, optional
If true, the upper PDF tail will be considered, i.e. the ratio
P(x > k_i) / P(x > mu_i) will be computed.
If false, P(x <= k_i) / P(x <= mu_i) is computed.
Returns
-------
array_like
The ratio P(x <= k_i) / P(x <= mu_i) for each observable bin i.
The CDF P(x <= y) is calculated based on the expected number of events
in each observable bin and under the assumption of a Poisson
distribution. If upper is True, then '<=' switches to '>'.
Shape: [n_bins]
"""
assert mu.shape == k.shape, 'Shape of \'mu\' and \'k\' have to be the same'
limit = np.copy(k)
is_nan = np.logical_or(np.isnan(k), np.isnan(mu))
is_finite = mu != 0.
a_ref = sc_dist.poisson.cdf(mu[is_finite], mu[is_finite])
a_k = sc_dist.poisson.cdf(k[is_finite], mu[is_finite])
if upper:
ratio = (1 - a_k) / (1 - a_ref)
ratio[1 - a_k == 0.] = np.inf
else:
ratio = a_k / a_ref
ratio[a_k == 0.] = np.inf
limit[is_finite] = ratio
limit[is_nan] = np.nan
return limit
def __calc_p_alpha_pdf__(pdfs, ks, mu, k, upper=True):
"""Get the CDF ratio at a given number of observed events k in each bin.
Similar to `__calc_p_alpha__`, but CDF is calculated based on the
computed normalized likelihood values `pdfs` and the corresponding
k values `ks`.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
The CDF ratio is evaluated at these k values.
Shape: [n_bins]
upper : bool, optional
If true, the upper PDF tail will be considered, i.e. the ratio
P(x > k_i) / P(x > mu_i) will be computed.
If false, P(x <= k_i) / P(x <= mu_i) is computed.
Returns
-------
array_like
The ratio P(x <= k_i) / P(x <= mu_i) for each observable bin i.
The CDF P(x <= y) is calculated based on the normalized likelihood
values `pdfs` and corresponding k values `ks`.
If upper is True, then '<=' switches to '>'.
Shape: [n_bins]
"""
assert mu.shape == k.shape, 'Shape of \'mu\' and \'k\' have to be the same'
limit = np.copy(k)
is_nan = np.logical_or(np.isnan(k), np.isnan(mu))
is_finite = mu != 0.
for i, (pdf, ksi) in enumerate(zip(pdfs, ks)):
cdf = np.cumsum(pdf)
if is_finite[i]:
mu_idx = np.where(ksi == int(mu[i]))[0]
if len(mu_idx) == 0:
a_ref = np.nan
else:
a_ref = cdf[mu_idx]
k_idx = np.where(ksi == int(k[i]))[0]
if len(k_idx) == 0:
if upper:
a_k = 1
else:
a_k = 0
else:
a_k = cdf[k_idx]
if upper:
if 1 - a_k == 0.:
limit[i] = np.inf
else:
ratio = (1 - a_k) / (1 - a_ref)
limit[i] = ratio
else:
if a_k == 0:
limit[i] = np.inf
else:
ratio = a_k / a_ref
limit[i] = ratio
limit[is_nan] = np.nan
return limit
def calc_p_alpha_ratio(mu, k):
"""Get the CDF ratio at the measured `k` values in each observable bin.
Parameters
----------
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
Shape: [n_bins]
array_like
The ratio of the PDF tails:
P(x <= k_i) / P(x <= mu_i) if k_i <= mu_i
P(x > k_i) / P(x > mu_i) if k_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the expected number of events
in each observable bin and under the assumption of a Poisson
distribution.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins]
"""
is_upper = k > mu
ratio = np.zeros_like(mu)
for upper in [False, True]:
if upper:
mask = is_upper
else:
mask = ~is_upper
returned_vals = __calc_p_alpha__(mu[mask],
k[mask],
upper=upper)
is_nan = np.logical_or(np.isnan(k[mask]), np.isnan(mu[mask]))
is_zero_mu = mu[mask] == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
ratio[mask] = returned_vals
return ratio
def calc_p_alpha_ratio_pdf(pdfs, ks, mu, k):
"""Get the CDF ratio at the measured `k` values in each observable bin.
Similar to `calc_p_alpha_ratio`, but the CDF calculation is based on the
normalized likelihood values `pdfs` and corresponding k values `ks`.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
Shape: [n_bins]
Returns
-------
array_like
The ratio of the PDF tails:
P(x <= k_i) / P(x <= mu_i) if k_i <= mu_i
P(x > k_i) / P(x > mu_i) if k_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the normalized likelihood
values `pdfs` and corresponding k values `ks`.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins]
"""
is_upper = k > mu
ratio = | np.zeros_like(mu) | numpy.zeros_like |
from __future__ import print_function
"""
Markov based methods for spatial dynamics.
"""
__author__ = "<NAME> <<EMAIL>"
__all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback",
"prais", "shorrock", "homogeneity"]
import numpy as np
from pysal.spatial_dynamics.ergodic import fmpt
from pysal.spatial_dynamics.ergodic import steady_state as STEADY_STATE
from scipy import stats
from operator import gt
import pysal
# TT predefine LISA transitions
# TT[i,j] is the transition type from i to j
# i = quadrant in period 0
# j = quadrant in period 1
# uses one offset so first row and col of TT are ignored
TT = np.zeros((5, 5), int)
c = 1
for i in range(1, 5):
for j in range(1, 5):
TT[i, j] = c
c += 1
# MOVE_TYPES is a dictionary that returns the move type of a LISA transition
# filtered on the significance of the LISA end points
# True indicates significant LISA in a particular period
# e.g. a key of (1, 3, True, False) indicates a significant LISA located in
# quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not
# significant in quadrant 3.
MOVE_TYPES = {}
c = 1
cases = (True, False)
sig_keys = [(i, j) for i in cases for j in cases]
for i, sig_key in enumerate(sig_keys):
c = 1 + i * 16
for i in range(1, 5):
for j in range(1, 5):
key = (i, j, sig_key[0], sig_key[1])
MOVE_TYPES[key] = c
c += 1
class Markov(object):
"""
Classic Markov transition matrices.
Parameters
----------
class_ids : array
(n, t), one row per observation, one column recording the
state of each observation, with as many columns as time
periods.
classes : array
(k, 1), all different classes (bins) of the matrix.
Attributes
----------
p : matrix
(k, k), transition probability matrix.
steady_state : matrix
(k, 1), ergodic distribution.
transitions : matrix
(k, k), count of transitions between each state i and j.
Examples
--------
>>> c = [['b','a','c'],['c','c','a'],['c','b','c']]
>>> c.extend([['a','a','b'], ['a','b','c']])
>>> c = np.array(c)
>>> m = Markov(c)
>>> m.classes.tolist()
['a', 'b', 'c']
>>> m.p
matrix([[ 0.25 , 0.5 , 0.25 ],
[ 0.33333333, 0. , 0.66666667],
[ 0.33333333, 0.33333333, 0.33333333]])
>>> m.steady_state
matrix([[ 0.30769231],
[ 0.28846154],
[ 0.40384615]])
US nominal per capita income 48 states 81 years 1929-2009
>>> import pysal
>>> f = pysal.open(pysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
set classes to quintiles for each year
>>> q5 = np.array([pysal.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[ 729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
matrix([[ 0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[ 0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[ 0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[ 0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[ 0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> m.steady_state
matrix([[ 0.20774716],
[ 0.18725774],
[ 0.20740537],
[ 0.18821787],
[ 0.20937187]])
Relative incomes
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> rq = pysal.Quantiles(rpci.flatten()).yb
>>> rq.shape = (48,81)
>>> mq = Markov(rq)
>>> mq.transitions
array([[ 707., 58., 7., 1., 0.],
[ 50., 629., 80., 1., 1.],
[ 4., 79., 610., 73., 2.],
[ 0., 7., 72., 650., 37.],
[ 0., 0., 0., 48., 724.]])
>>> mq.steady_state
matrix([[ 0.17957376],
[ 0.21631443],
[ 0.21499942],
[ 0.21134662],
[ 0.17776576]])
"""
def __init__(self, class_ids, classes=None):
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(class_ids)
n, t = class_ids.shape
k = len(self.classes)
js = range(t - 1)
classIds = self.classes.tolist()
transitions = np.zeros((k, k))
for state_0 in js:
state_1 = state_0 + 1
state_0 = class_ids[:, state_0]
state_1 = class_ids[:, state_1]
initial = np.unique(state_0)
for i in initial:
ending = state_1[state_0 == i]
uending = np.unique(ending)
row = classIds.index(i)
for j in uending:
col = classIds.index(j)
transitions[row, col] += sum(ending == j)
self.transitions = transitions
row_sum = transitions.sum(axis=1)
p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions)
self.p = np.matrix(p)
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n,t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer
number of classes (quantiles).
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool
If true, quantiles are taken over the entire n*t
pooled series. If false, quantiles are taken each
time period over n.
variable_name : string
name of variable.
Attributes
----------
p : matrix
(k, k), transition probability matrix for a-spatial
Markov.
s : matrix
(k, 1), ergodic distribution for a-spatial Markov.
transitions : matrix
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : matrix
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : matrix
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : matrix
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : matrix
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on Bickenbach and Bode (2003) [Bickenbach2003]_.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on Bickenback and Bode (2003)
[Bickenbach2003]_.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on Rey (2001) [Rey2001]_.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
This is new
Examples
--------
>>> import pysal as ps
>>> f = ps.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = ps.open(ps.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
>>> sm = ps.Spatial_Markov(rpci, w, fixed=True, k=5, variable_name='rpci')
>>> for p in sm.P:
... print(p)
...
[[ 0.96341463 0.0304878 0.00609756 0. 0. ]
[ 0.06040268 0.83221477 0.10738255 0. 0. ]
[ 0. 0.14 0.74 0.12 0. ]
[ 0. 0.03571429 0.32142857 0.57142857 0.07142857]
[ 0. 0. 0. 0.16666667 0.83333333]]
[[ 0.79831933 0.16806723 0.03361345 0. 0. ]
[ 0.0754717 0.88207547 0.04245283 0. 0. ]
[ 0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[ 0. 0. 0.06372549 0.90196078 0.03431373]
[ 0. 0. 0. 0.19444444 0.80555556]]
[[ 0.84693878 0.15306122 0. 0. 0. ]
[ 0.08133971 0.78947368 0.1291866 0. 0. ]
[ 0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[ 0. 0. 0.09411765 0.87058824 0.03529412]
[ 0. 0. 0. 0.10204082 0.89795918]]
[[ 0.8852459 0.09836066 0. 0.01639344 0. ]
[ 0.03875969 0.81395349 0.13953488 0. 0.00775194]
[ 0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[ 0. 0.02339181 0.12865497 0.75438596 0.09356725]
[ 0. 0. 0. 0.09661836 0.90338164]]
[[ 0.33333333 0.66666667 0. 0. 0. ]
[ 0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[ 0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[ 0. 0.01036269 0.06217617 0.89637306 0.03108808]
[ 0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[ 0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[ 0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[ 0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[ 0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[ 0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[ 117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[ 127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[ 138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[ 127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[ 169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
"""
def __init__(self, y, w, k=4, permutations=0, fixed=False,
variable_name=None):
self.y = y
rows, cols = y.shape
self.k = k
self.cols = cols
npa = np.array
self.fixed = fixed
self.variable_name = variable_name
if fixed:
yf = y.flatten()
yb = pysal.Quantiles(yf, k=k).yb
yb.shape = (rows, cols)
classes = yb
else:
classes = npa([pysal.Quantiles(y[:, i], k=k)
.yb for i in np.arange(cols)]).transpose()
classic = Markov(classes)
self.classes = classes
self.p = classic.p
self.transitions = classic.transitions
T, P = self._calc(y, w, classes, k=k)
self.T = T
self.P = P
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w, classes, k=k)
x2 = [chi2(T[i], self.transitions)[0] for i in range(k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w, classes, k):
ly = pysal.lag_spatial(w, y)
npa = np.array
if self.fixed:
l_classes = pysal.Quantiles(ly.flatten(), k=k).yb
l_classes.shape = ly.shape
else:
l_classes = npa([pysal.Quantiles(
ly[:, i], k=k).yb for i in | np.arange(self.cols) | numpy.arange |
"""
Provides a random set of utility methods.
.. include:: ../include/links.rst
"""
import warnings
from IPython import embed
import numpy as np
from scipy import sparse, linalg, stats, special, ndimage, spatial
# Only used for debugging...
from matplotlib import pyplot
from astropy.stats import sigma_clip
from ..models import geometry
# TODO: Build a Covariance class the pulls in all the covariance methods. This
# would make all the positive-definite + tracking easier.
# TODO: Add a set of weights?
def get_map_bin_transformations(spatial_shape=None, binid=None):
r"""
Construct various arrays used to convert back and forth between a 2D map
and the associated vector of (unique) binned quantities.
The input is expected to be for 2D maps with a given "spatial shape". For
the method to run, you need to provide one of the two arguments;
precedence is given to ``binid``.
Provided an independent calculation of the value in each map position,
this method provides the transformation matrix, :math:`\mathbf{T}`, used
to calculate the binned values:
.. math::
\mathbf{b} = \mathbf{T} \dot \mathbf{m},
where :math:`\mathbf{b}` is the vector with the binned data and
:math:`\mathbf{m}` is the vector with the flattened map data.
If all spaxels are independent, :math:`\mathbf{T}` in the above operation
simply (down)selects, and possibly reorders, elements in
:math:`\mathbf{m}` to match the bin indices.
Parameters
----------
spatial_shape : :obj:`tuple`, optional
The 2D spatial shape of the mapped data. Ignored if ``binid`` is
provided.
binid : `numpy.ndarray`_, optional
The 2D array providing the 0-indexed bin ID number associated with
each map element. Bin IDs of -1 are assumed to be ignored; no bin ID
can be less than -1. Shape is ``spatial_shape`` and its size (i.e.
the number of grid points in the map) is :math:`N_{\rm spaxel}`.
Returns
-------
ubinid : `numpy.ndarray`_
1D vector with the sorted list of unique bin IDs. Shape is
:math:`(N_{\rm bin},)`. If ``binid`` is not provided, this is
returned as None.
nbin : `numpy.ndarray`_
1D vector with the number of spaxels in each bin. Shape is
:math:`(N_{\rm bin},)`. If ``binid`` is not provided, this is just a
vector of ones. The number of bins can also be determined from the
returned ``bin_transform`` array::
assert np.array_equal(nbin, np.squeeze(np.asarray(np.sum(bin_transform > 0, axis=1)))))
ubin_indx : `numpy.ndarray`_
The index vector used to select the unique bin values from a
flattened map of binned data, *excluding* any element with ``binid ==
-1``. Shape is :math:`(N_{\rm bin},)`. If ``binid`` is not provided,
this is identical to ``grid_indx``. These indices can be used to
reconstruct the list of unique bins; i.e.::
assert np.array_equal(ubinid, binid.flat[ubin_indx])
grid_indx : `numpy.ndarray`_
The index vector used to select valid grid cells in the input maps;
i.e., any grid point with a valid bin ID (``binid != -1``). Shape is
:math:`(N_{\rm valid},)`. For example::
indx = binid > -1
assert np.array_equal(binid[indx], binid[np.unravel_index(grid_indx, binid.shape)])
bin_inverse : `numpy.ndarray`_
The index vector applied to a recover the mapped data given the
unique quantities, when used in combination with ``grid_indx``. Shape
is :math:`(N_{\rm valid},)`. For example::
_binid = np.full(binid.shape, -1, dtype=int)
_binid[np.unravel_index(grid_indx, binid.shape)] = ubinid[bin_inverse]
assert np.array_equal(binid, _binid)
bin_transform : `scipy.sparse.csr_matrix`_
A sparse matrix that can be used to construct the binned set of
quantities from a full 2D map. See :math:`\mathbf{T}` in the method
description. Shape is :math:`(N_{\rm bin}, N_{\rm spaxel})`. Without
any weighting, :math:`\mathbf{T}` just constructs the average of the
values within the map that is applied to. In this case (or if all of
the bins only contain a single spaxel), the following should pass::
assert np.array_equal(ubinid, bin_transform.dot(binid.ravel()).astype(int))
"""
if spatial_shape is None and binid is None:
raise ValueError('Must provide spatial_shape or binid')
_spatial_shape = spatial_shape if binid is None else binid.shape
nspax = np.prod(_spatial_shape)
grid_indx = np.arange(nspax, dtype=int)
if binid is None:
# All bins are valid and considered unique
bin_transform = sparse.coo_matrix((np.ones(np.prod(spatial_shape), dtype=float),
(grid_indx,grid_indx)),
shape=(np.prod(spatial_shape),)*2).tocsr()
return None, np.ones(nspax, dtype=int), grid_indx.copy(), grid_indx, grid_indx.copy(), \
bin_transform
# Get the indices of measurements with unique bin IDs, ignoring any
# IDs set to -1
binid_map = binid.ravel()
ubinid, ubin_indx, bin_inverse, nbin \
= np.unique(binid_map, return_index=True, return_inverse=True, return_counts=True)
if np.any(ubinid == -1):
ubinid = ubinid[1:]
ubin_indx = ubin_indx[1:]
grid_indx = grid_indx[bin_inverse > 0]
bin_inverse = bin_inverse[bin_inverse > 0] - 1
nbin = nbin[1:]
# NOTE: In most cases, ubinid[bin_inverse] is identical to bin_inverse. The
# exception is if the bin numbers are not sequential, i.e., the bin numbers
# are not identical to np.arange(nbin).
# Construct the bin transform using a sparse matrix
d,i,j = np.array([[1/nbin[i],i,j]
for i,b in enumerate(ubinid)
for j in np.where(binid_map == b)[0]]).T
bin_transform = sparse.coo_matrix((d,(i.astype(int),j.astype(int))),
shape=(ubinid.size, np.prod(_spatial_shape))).tocsr()
return ubinid, nbin, ubin_indx, grid_indx, bin_inverse, bin_transform
def impose_positive_definite(mat, min_eigenvalue=1e-10, renormalize=True):
"""
Force a matrix to be positive definite.
Following, e.g.,
http://comisef.wikidot.com/tutorial:repairingcorrelation, the algorithm
is as follows:
- Calculate the eigenvalues and eigenvectors of the provided matrix
(this is the most expensive step).
- Impose a minimum eigenvalue (see ``min_eigenvalue``)
- Reconstruct the input matrix using the eigenvectors and the
adjusted eigenvalues
- Renormalize the reconstructed matrix such its diagonal is identical
to the input matrix, if requested.
Args:
mat (`scipy.sparse.csr_matrix`_):
The matrix to force to be positive definite.
min_eigenvalue (:obj:`float`, optional):
The minimum allowed matrix eigenvalue.
renormalize (:obj:`bool`, optional):
Include the renormalization (last) step in the list above.
Returns:
`scipy.sparse.csr_matrix`_: The modified matrix.
"""
if not isinstance(mat, sparse.csr_matrix):
raise TypeError('Must provide a scipy.sparse.csr_matrix to impose_positive_definite.')
# Get the eigenvalues/eigenvectors
# WARNING: I didn't explore why too deeply, but scipy.sparse.linalg.eigs
# provided *significantly* different results. They also seem to be worse in
# the sense that the reconstructed matrix based on the adjusted eigenvalues
# is more different than input matrix compared to the use of
# numpy.linalg.eig.
# NOTE: This command can take a while, depending on the size of the
# array...
w, v = map(lambda x : np.real(x), np.linalg.eig(mat.toarray()))
if np.all(w > 0):
# Already positive definite
return mat
# Force a minimum eigenvalue
w = np.maximum(w, min_eigenvalue)
# Reconstruct with the new eigenvalues
_mat = np.dot(v, np.dot(np.diag(w), v.T))
if not renormalize:
return sparse.csr_matrix(_mat)
# Renormalize
d = mat.diagonal()
t = 1./np.sqrt(np.diag(_mat))
return sparse.csr_matrix(_mat * np.outer(t,t) * np.sqrt(np.outer(d,d)))
def is_positive_definite(mat, quiet=True):
r"""
Check if a matrix is positive definite.
This is done by calculating the eigenvalues and eigenvectors of the
provided matrix and checking if all the eigenvalues are :math:`>0`.
Because of that, it is nearly as expensive as just calling
:func:`impose_positive_definite`.
Args:
mat (`scipy.sparse.csr_matrix`_):
The matrix to check.
quiet (:obj:`bool`, optional):
Suppress terminal output.
Returns:
:obj:`bool`: Flag that matrix is positive definite.
"""
if not isinstance(mat, sparse.csr_matrix):
raise TypeError('Must provide a scipy.sparse.csr_matrix to is_positive_definite.')
# Get the eigenvalues/eigenvectors
w, v = map(lambda x : np.real(x), np.linalg.eig(mat.toarray()))
notpos = np.logical_not(w > 0)
if not quiet:
if np.any(notpos):
warnings.warn(f'{np.sum(notpos)} eigenvalues are not positive!')
print('{0:>6} {1:>8}'.format('Index', 'EigenVal'))
for i in np.where(notpos)[0]:
print('{0:>6} {1:8.2e}'.format(i, w[i]))
return not np.any(notpos)
def cinv(mat, check_finite=False, upper=False):
r"""
Use Cholesky decomposition to invert a matrix.
Args:
mat (`numpy.ndarray`_, `scipy.sparse.csr_matrix`_):
The array to invert.
check_finite (:obj:`bool`, optional):
Check that all the elements of ``mat`` are finite. See
`scipy.linalg.cholesky`_ and `scipy.linalg.solve_triangular`_.
upper (:obj:`bool`, optional):
Return only the upper triangle matrix that can be used to
construct the inverse matrix. I.e., for input matrix
:math:`\mathbf{M}`, this returns matrix :math:`\mathbf{U}` such
that :math:`\mathbf{M}^{-1} = \mathbf{U} \mathbf{U}^T`.
Returns:
`numpy.ndarray`_: Inverse or upper-triangle decomposition of the input
matrix, depending on ``upper``.
"""
_mat = mat.toarray() if isinstance(mat, sparse.csr.csr_matrix) else mat
# This uses scipy.linalg, not numpy.linalg
cho = linalg.cholesky(_mat, check_finite=check_finite)
# Returns an upper triangle matrix that can be used to construct the inverse matrix (see below)
cho = linalg.solve_triangular(cho, np.identity(cho.shape[0]), check_finite=check_finite)
# TODO: Make it a sparse matrix if upper?
return cho if upper else np.dot(cho, cho.T)
def boxcar_replicate(arr, boxcar):
"""
Boxcar replicate an array.
Args:
arr (`numpy.ndarray`_):
Array to replicate.
boxcar (:obj:`int`, :obj:`tuple`):
Integer number of times to replicate each pixel. If a
single integer, all axes are replicated the same number
of times. If a :obj:`tuple`, the integer is defined
separately for each array axis; length of tuple must
match the number of array dimensions.
Returns:
`numpy.ndarray`_: The block-replicated array.
"""
# Check and configure the input
_boxcar = (boxcar,)*arr.ndim if isinstance(boxcar, int) else boxcar
if not isinstance(_boxcar, tuple):
raise TypeError('Input `boxcar` must be an integer or a tuple.')
if len(_boxcar) != arr.ndim:
raise ValueError('Must provide an integer or tuple with one number per array dimension.')
# Perform the boxcar average over each axis and return the result
_arr = arr.copy()
for axis, box in zip(range(arr.ndim), _boxcar):
_arr = np.repeat(_arr, box, axis=axis)
return _arr
def inverse(array):
"""
Calculate ``1/array``, enforcing positivity and setting values <= 0 to
zero.
The input array should be a quantity expected to always be positive, like
a variance or an inverse variance. The quantity returned is::
out = (array > 0.0)/(np.abs(array) + (array == 0.0))
Args:
array (`numpy.ndarray`_):
Array to element-wise invert
Returns:
`numpy.ndarray`: The result of the element-wise inversion.
"""
return (array > 0.0)/(np.abs(array) + (array == 0.0))
def sigma_clip_stdfunc_mad(data, **kwargs):
"""
A simple wrapper for `scipy.stats.median_abs_deviation`_ that omits NaN
values and rescales the output to match a normal distribution for use in
`astropy.stats.sigma_clip`_.
Args:
data (`numpy.ndarray`_):
Data to clip.
**kwargs:
Passed directly to `scipy.stats.median_abs_deviation`_.
Returns:
scalar-like, `numpy.ndarray`_: See `scipy.stats.median_abs_deviation`_.
"""
return stats.median_abs_deviation(data, **kwargs, nan_policy='omit', scale='normal')
# TODO: Instead apply eps to the error (i.e., we don't want the weight to be
# large)?
def construct_ivar_weights(error, eps=None):
r"""
Produce inverse-variance weights based on the input errors.
Weights are set to 0 if the error is :math:`<=0` or if the inverse
variance is less than ``eps``.
Args:
error (`numpy.ndarray`_):
Error to use to construct weights.
eps (:obj:`float`, optional):
The minimum allowed weight. Any weight (inverse variance) below
this value is set to 0. If None, no minimum to the inverse
variance is enforced.
Returns:
`numpy.ndarray`_: The inverse variance weights.
"""
indx = error > 0
wgts = np.zeros(error.shape, dtype=float)
wgts[indx] = 1.0/error[indx]**2
if eps is not None:
wgts[wgts < eps] = 0.
return wgts
# TODO: Allow one to include covariance in all the stats functions below?
def aggregate_stats(x, y, ye=None, wgts=None, gpm=None, eps=None, fill_value=None):
"""
Construct a set of aggregate statistics for the provided data.
Args:
x (`numpy.ndarray`_):
Independent coordinates
y (`numpy.ndarray`_):
Dependent coordinates
ye (`numpy.ndarray`_, optional):
Errors in the dependent coordinates. Used to construct inverse
variance weights. If not provided, no inverse-variance weights
are applied.
wgts (`numpy.ndarray`_, optional):
Weights to apply. Ignored if errors are provided. If None and no
errors are provided (``ye``), uniform weights are applied.
gpm (`numpy.ndarray`_, optional):
Good-pixel mask used to select data to include. If None, all data
are included.
eps (:obj:`float`, optional):
Minimum allowed weight. Any weight below this value is set to 0.
fill_value (:obj:`float`, optional):
If the statistics cannot be determined, replace the output with
this fill value.
Returns:
:obj:`tuple`: The unweighted median y value, the unweighted median
absolute deviation rescaled to match the standard deviation, the
unweighted mean x, the unweighted mean y, the unweighted standard
deviation of y, the error-weighted mean x, the error-weighted mean y,
the error-weighted standard deviation of y, the error-weighted error
in the mean y, the number of data points aggregated (any value with a
non-zero weight), and a boolean `numpy.ndarray`_ with flagging the
data included in the calculation.
"""
# Weights
_wgts = (np.ones(x.size, dtype=float) if wgts is None else wgts) \
if ye is None else construct_ivar_weights(ye, eps=eps)
indx = _wgts > 0
if gpm is not None:
indx &= gpm
# Number of aggregated data points
nbin = np.sum(indx)
if nbin == 0:
# Default values are all set to None
return (fill_value,)*9 + (0, indx)
# Unweighted statistics
uwmed = np.median(y[indx])
uwmad = sigma_clip_stdfunc_mad(y[indx])
uwxbin = np.mean(x[indx])
uwmean = np.mean(y[indx])
uwsdev = np.sqrt(np.dot(y[indx]-uwmean,y[indx]-uwmean)/(nbin-1)) if nbin > 1 else fill_value
# Weighted statistics
# TODO: Include covariance
wsum = np.sum(_wgts[indx])
ewxbin = np.dot(_wgts[indx],x[indx])/wsum
ewmean = np.dot(_wgts[indx],y[indx])/wsum
ewsdev = np.dot(_wgts[indx],y[indx]**2)/wsum - ewmean**2
ewsdev = fill_value if ewsdev < 0 or nbin <= 1 else np.sqrt(ewsdev*nbin/(nbin-1))
ewerr = np.sqrt(1./wsum)
return uwmed, uwmad, uwxbin, uwmean, uwsdev, ewxbin, ewmean, ewsdev, ewerr, nbin, indx
def _select_rej_stat(rej_stat, ewmean, ewsdev, uwmean, uwsdev, uwmed, uwmad):
"""
Select and return the desired rejection statistic.
"""
if rej_stat == 'ew':
return ewmean, ewsdev
if rej_stat == 'uw':
return uwmean, uwsdev
if rej_stat == 'ro':
return uwmed, uwmad
raise ValueError('rej_stat must be ew, uw, or ro.')
def clipped_aggregate_stats(x, y, ye=None, wgts=None, gpm=None, eps=None, fill_value=None,
sig_rej=None, rej_stat='ew', maxiter=None):
"""
Construct a set of aggregate statistics for the provided data with
iterative rejection.
This method iteratively executes :func:`aggregate_stats` with rejection
iterations. If ``sig_rej`` is None, this is identical to a single
execution of :func:`aggregate_stats`.
Args:
x (`numpy.ndarray`_):
Independent coordinates
y (`numpy.ndarray`_):
Dependent coordinates
ye (`numpy.ndarray`_, optional):
Errors in the dependent coordinates. Used to construct inverse
variance weights. If not provided, no inverse-variance weights
are applied.
wgts (`numpy.ndarray`_, optional):
Weights to apply. Ignored if errors are provided. If None and no
errors are provided (``ye``), uniform weights are applied.
gpm (`numpy.ndarray`_, optional):
Good-pixel mask used to select data to include. If None, all data
are included.
eps (:obj:`float`, optional):
Minimum allowed weight. Any weight below this value is set to 0.
fill_value (:obj:`float`, optional):
If the statistics cannot be determined, replace the output with
this fill value.
sig_rej (:obj:`float`, optional):
The symmetric rejection threshold in units of the standard
deviation. If None, no rejection is performed.
use_ew_stats (:obj:`str`, optional):
The statistic to use when determining which values to reject.
Allowed options are:
- 'ew': Use the error-weighted mean and standard deviation
- 'uw': Use the unweighted mean and standard deviation
- 'ro': Use the robust statisitics, the unweighted median and
median absolute deviation (where the latter is normalized
to nominally match the standard deviation)
maxiter (:obj:`int`, optional):
Maximum number of rejection iterations; ``maxiter = 1`` means
there are *no* rejection iterations. If None, iterations continue
until no more data are rejected.
Returns:
:obj:`tuple`: The unweighted median y value, the unweighted median
absolute deviation rescaled to match the standard deviation, the
unweighted mean x, the unweighted mean y, the unweighted standard
deviation of y, the error-weighted mean x, the error-weighted mean y,
the error-weighted standard deviation of y, the error-weighted error
in the mean y, and the number of data points aggregated (any value
with a non-zero weight).
"""
# Run the first iteration. The weights and good-pixel mask are defined here
# so that they don't need to be redetermined for each call to
# aggregate_stats
_wgts = (np.ones(x.size, dtype=float) if wgts is None else wgts) \
if ye is None else construct_ivar_weights(ye, eps=eps)
_gpm = _wgts > 0
if gpm is not None:
_gpm &= gpm
# Get the stats
uwmed, uwmad, uwxbin, uwmean, uwsdev, ewxbin, ewmean, ewsdev, ewerr, nbin, new_gpm \
= aggregate_stats(x, y, wgts=_wgts, gpm=_gpm, fill_value=fill_value)
if nbin == 0 or sig_rej is None or maxiter == 1:
# If there were no data includes or the rejection sigma is not
# provided, then we're done
return uwmed, uwmad, uwxbin, uwmean, uwsdev, ewxbin, ewmean, ewsdev, ewerr, nbin, new_gpm
_gpm &= new_gpm
i = 1
while maxiter is None or i < maxiter:
mean, sigma = _select_rej_stat(rej_stat, ewsdev, uwsdev, uwmad)
rej = (y > mean + sig_rej*sigma) | (y < mean - sig_rej*sigma)
if not np.any(rej):
# Nothing was rejected so we're done
return uwmed, uwmad, uwxbin, uwmean, uwsdev, ewxbin, ewmean, ewsdev, ewerr, nbin, _gpm
# Include the rejection in the good-pixel mask
_gpm &= np.logical_not(rej)
uwmed, uwmad, uwxbin, uwmean, uwsdev, ewxbin, ewmean, ewsdev, ewerr, nbin, new_gpm \
= aggregate_stats(x, y, wgts=_wgts, gpm=_gpm, fill_value=fill_value)
_gpm &= new_gpm
i += 1
def bin_stats(x, y, bin_center, bin_width, ye=None, wgts=None, gpm=None, eps=None, fill_value=None,
sig_rej=None, rej_stat='ew', maxiter=None):
r"""
Compute aggregate statistics for a set of bins.
This method runs :func:`clipped_aggregate_stats` on the data in each bin.
The bin centers and widths must be pre-defined. Bins are allowed to
overlap.
Args:
x (`numpy.ndarray`_):
Independent coordinates
y (`numpy.ndarray`_):
Dependent coordinates
bin_center (`numpy.ndarray`_):
The set of independent coordinates for the center of each bin.
bin_width (`numpy.ndarray`_):
The width of each bin.
ye (`numpy.ndarray`_, optional):
Errors in the dependent coordinates. Used to construct inverse
variance weights. If not provided, no inverse-variance weights
are applied.
wgts (`numpy.ndarray`_, optional):
Weights to apply. Ignored if errors are provided. If None and no
errors are provided (``ye``), uniform weights are applied.
gpm (`numpy.ndarray`_, optional):
Good-pixel mask used to select data to include. If None, all data
are included.
eps (:obj:`float`, optional):
Minimum allowed weight. Any weight below this value is set to 0.
fill_value (:obj:`float`, optional):
If the statistics cannot be determined, replace the output with
this fill value.
sig_rej (:obj:`float`, optional):
The symmetric rejection threshold in units of the standard
deviation. If None, no rejection is performed.
use_ew_stats (:obj:`str`, optional):
The statistic to use when determining which values to reject.
Allowed options are:
- 'ew': Use the error-weighted mean and standard deviation
- 'uw': Use the unweighted mean and standard deviation
- 'ro': Use the robust statisitics, the unweighted median and
median absolute deviation (where the latter is normalized
to nominally match the standard deviation)
maxiter (:obj:`int`, optional):
Maximum number of rejection iterations; ``maxiter = 1`` means
there are *no* rejection iterations. If None, iterations continue
until no more data are rejected.
Returns:
:obj:`tuple`: Thirteen `numpy.ndarray`_ objects are returned: The
coordinate of the bin centers (this is just the input ``bin_centers``
array), the unweighted median y value, the unweighted median absolute
deviation rescaled to match the standard deviation, the unweighted
mean x, the unweighted mean y, the unweighted standard deviation of
y, the error-weighted mean x, the error-weighted mean y, the
error-weighted standard deviation of y, the error-weighted error in
the mean y, the total number of data points in the bin (this excludes
any data that are masked on input either because ``ye`` or wgt`` is
not larger than 0 or ``gpm`` is False), the number of data points
used in the aggregated statistics, and a boolean array selecting data
that were included in any bin. The shape of all arrays is the same as
the input ``bin_centers``, except for the last array which is the
same shape as the input ``x``.
"""
# Setup the weights and good-pixel mask for all of the data here so that
# they don't need to be redetermined for each call to aggregate_stats.
_wgts = (np.ones(x.size, dtype=float) if wgts is None else wgts) \
if ye is None else construct_ivar_weights(ye, eps=eps)
_gpm = _wgts > 0
if gpm is not None:
_gpm &= gpm
# Setup the output arrays
nbins = bin_center.size
uwxbin = np.zeros(nbins, dtype=float)
uwmed = np.zeros(nbins, dtype=float)
uwmad = np.zeros(nbins, dtype=float)
uwmean = np.zeros(nbins, dtype=float)
uwsdev = np.zeros(nbins, dtype=float)
ewxbin = | np.zeros(nbins, dtype=float) | numpy.zeros |
# encoding: utf-8
# pylint: disable=C0103
# pylint: disable=too-many-arguments
"""
Util
====
Signal segmentation
-------------------
.. autosummary::
:toctree: generated/
segmentSignal
beat2signal
get_time_segment
Time-frequency
------------------
.. autosummary::
:toctree: generated/
STFT
fft2mel
hz2mel
mel2hz
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
example_audio_file
example_beats_file
find_nearest
deltas
"""
import warnings
import numpy as np
import scipy.signal
import scipy.fftpack as fft
from scipy.stats import pearsonr
import pkg_resources
from .exceptions import ParameterError
import os
import json
from pathlib import Path
from pkg_resources import resource_filename
import pooch
from . import version
# Instantiate the pooch
__data_path = os.environ.get("CARAT_DATA_DIR", pooch.os_cache("carat"))
__GOODBOY = pooch.create(
__data_path, base_url=f"https://github.com/mrocamora/carat/raw/{version.version}/examples/data/", registry=None
)
__GOODBOY.load_registry(
pkg_resources.resource_stream(__name__, str(Path("example_data") / "registry.txt"))
)
with open(
resource_filename(__name__, str(Path("example_data") / "index.json")), "r"
) as fdesc:
__TRACKMAP = json.load(fdesc)
__all__ = ['find_nearest', 'STFT', 'hz2mel', 'mel2hz', 'deltas']
def find_nearest(array, value):
"""Find index of the nearest value of an array to a given value
Parameters
----------
array : np.ndarray
input array
value : float
value
Returns
-------
idx : int
index of nearest value in the array
"""
idx = (np.abs(array-value)).argmin()
return idx
def STFT(x, window_length, hop, windowing_function=np.hanning, dft_length=None,
zp_flag=False):
""" Calculates the Short-Time Fourier Transform a signal.
Given an input signal, it calculates the DFT of frames of the signal and stores them
in bi-dimensional Scipy array.
Parameters
----------
window_len : float
length of the window in seconds (must be positive).
window : callable
a callable object that receives the window length in samples and
returns a numpy array containing the windowing function samples.
hop : float
frame hop between adjacent frames in seconds.
final_time : int
time (in seconds) up to which the spectrogram is calculated (must be positive).
zp_flag : bool
a flag indicating if the *Zero-Phase Windowing* should be performed.
Returns
-------
spec : np.array
(missing)
time : np.array
(missing)
frequency : np.array
(missing)
"""
# Checking input:
if x.ndim != 1:
raise AttributeError("Data must be one-dimensional.")
# Window length must be odd:
if window_length%2 == 0:
window_length = window_length + 1
# DFT length is equal the window_len+1 (always even)
if dft_length is None:
dft_length = window_length + 1
# If dft_length was set by the user, it should always be larger than the window length.
if dft_length < window_length:
warnings.warn("DFT length is smaller than window length.", RuntimeWarning)
# Partitioning the input signal:
part_sig = segmentSignal(x, window_length, hop)
no_cols = part_sig.shape[1]
# Applying the window:
window = windowing_function(window_length)
win_sig = part_sig * np.transpose(np.tile(window, (no_cols, 1)))
# Zero-phase windowing:
if zp_flag:
win_sig = fft.fftshift(win_sig, axes=0)
# Taking the FFT of the partitioned signal
spec = fft.fftshift(fft.fft(win_sig, n=dft_length, axis=0), axes=0)
# Normalizing energy
spec /= np.sum(window)
# Calculating time and frequency indices for the data
frequency = fft.fftshift(fft.fftfreq(dft_length))
time = np.arange(no_cols)*float(hop) + ((window_length-1)/2)
# Creating output spectrogram
return spec, time, frequency
def segmentSignal(signal, window_len, hop):
""" Segmentation of an array-like input:
Given an array-like, this function calculates the DFT of frames of the signal and stores them
in bi-dimensional Scipy array.
Parameters
----------
signal : array-like
object to be windowed. Must be a one-dimensional array-like object.
window_len : int
window size in samples.
hop : int
frame hop between adjacent frames in seconds.
Returns
-------
part_sig : np.array
2-D array containing the windowed signal.
Notes
-----
Each element of the output array X can be defined as:
X[m,n] = x[n+Hm]
where, H is the HOP in samples, 0<=n<=N, N = window_len, and 0<m<floor(((len(x)-N)/H)+1).
Raises
------
AttributeError if signal is not one-dimensional.
ValueError if window_len or hop are not strictly positives.
"""
if(window_len <= 0 or hop <= 0):
raise ValueError("window_len and hop values must be strictly positive numbers.")
if signal.ndim != 1:
raise AttributeError("Input signal must be one dimensional.")
# Calculating the number of columns:
no_cols = int(np.floor((np.size(signal)-window_len)/float(hop))+1)
# Windowing indices (which element goes to which position in the windowed matrix).
ind_col = np.tile( | np.arange(window_len, dtype=np.uint64) | numpy.arange |
# note: documentation not written yet
import time
import numba
import numpy as np
from scipy import optimize
from scipy import interpolate
from types import SimpleNamespace
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
class AiyagariModel:
############
# 1. setup #
############
def __init__(self, name="", **kwargs):
self.name = name
self.setup_parameters()
self.update_parameters(kwargs)
self.setup_primitive_functions()
self.setup_misc()
def setup_parameters(self):
# a. model parameters
self.beta = 0.96 # discount factor
self.delta = 0.08 # depreciation rate
self.sigma = 4 # crra coefficient
self.alpha = 1 / 3 # cobb-douglas coeffient
# b. solution
self.tol_cfunc_inf = 1e-6 # tolerance for consumption function
self.cfunc_maxiter = (
2000
) # maximum number of iterations when finding consumption function
# income
self.unemp_p = 0.05 # unemployment probability
self.unemp_b = 0.15 # unemployment benefits
self.Nz = 2 # number of productivity states
self.grid_z = np.array([0.90, 1.10]) # productivity values
self.trans_p_z = np.array(
[[0.95, 0.05], [0.05, 0.95]]
) # transition probabilities
# end-of-period assets grid
self.Na = 200
self.a_min = 0
self.a_max = 20
self.a_phi = 1.1
# cash-on-hand grid
self.Nm = 500
self.m_max = 20
self.m_phi = 1.1
# c. simulation
self.seed = 2018
# d. steady state
self.ss_R_tol = 1e-7 # tolerance for finding interest rate
self.ss_a0 = 4.0 # initial cash-on-hand (homogenous)
self.ss_simN = 50000 # number of households
self.ss_simT = 2000 # number of time-periods
self.ss_sim_burnin = 1000 # burn-in periods before calculating average savings
# e. transition path
self.transN = 50000 # number of households
self.transT = 200 # number of periods
self.trans_maxiter = 200
self.trans_tol = 1e-4 # tolerance for convergence
def update_parameters(self, kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def setup_primitive_functions(self):
eps = 1e-8
# a. utility function
if self.sigma == 1:
self.u = lambda c: np.log(np.fmax(c, eps))
else:
self.u = lambda c: np.fmax(c, eps) ** (1 - self.sigma) / (1 - self.sigma)
self.u_prime = lambda c: np.fmax(c, eps) ** (-self.sigma)
self.u_prime_inv = lambda x: x ** (-1 / self.sigma)
# b. production function
self.f = lambda k: np.fmax(k, eps) ** self.alpha
self.f_prime = lambda k: self.alpha * np.fmax(k, eps) ** (self.alpha - 1)
self.f_prime_inv = lambda x: (np.fmax(x, eps) / self.alpha) ** (
1 / (self.alpha - 1)
)
def setup_misc(self):
def nonlinspace(
min_val, max_val, num, phi
): # phi up, more points close to min_val
x = np.zeros(num)
x[0] = min_val
for i in range(1, num):
x[i] = x[i - 1] + (max_val - x[i - 1]) / (num - i) ** phi
return x
# a. grids
self.grid_a = nonlinspace(self.a_min, self.a_max, self.Na, self.a_phi)
self.grid_m = nonlinspace(0, self.m_max, self.Nm, self.m_phi)
# b. initial distribution of z
z_diag = np.diag(self.trans_p_z ** 1000)
self.ini_p_z = z_diag / np.sum(z_diag)
avg_z = np.sum(self.grid_z * self.ini_p_z)
self.grid_z = self.grid_z / avg_z # force mean one
# c. bounds on interst factor
self.R_high = 1 / self.beta + 0.005
self.R_low = 1 / self.beta - 0.005
# d. misc
self.c_transition_path = np.empty((1, 1, 1)) # raw allocate
######################
# 2. model functions #
######################
def R_func(self, k):
return 1 + self.f_prime(k) - self.delta
def w_func(self, k):
return self.f(k) - self.f_prime(k) * k
def w_from_R_func(self, R):
k = self.f_prime_inv(R - 1 + self.delta)
return self.w_func(k)
############
# 3. solve #
############
def solve_step(self, c_plus_interp, R, w):
c_func = []
for i_z in range(self.Nz):
# a. find next-period average marginal utility
avg_marg_u_plus = np.zeros(self.Na)
for i_zplus in range(self.Nz):
for u in [0, 1]:
# i. future cash-on-hand
if u == 0:
m_plus = R * self.grid_a + w * (
self.grid_z[i_zplus] - self.unemp_p * self.unemp_b
) / (1 - self.unemp_p)
else:
m_plus = R * self.grid_a + w * self.unemp_b
# ii. future consumption
c_plus = c_plus_interp[i_zplus](m_plus)
# iii. future marginal utility
marg_u_plus = self.u_prime(c_plus)
# iv. accumulate average marginal utility
weight = self.trans_p_z[i_z, i_zplus]
if u == 0:
weight *= 1 - self.unemp_p
else:
weight *= self.unemp_p
avg_marg_u_plus += weight * marg_u_plus
# b. find current consumption and cash-on-hand
c = self.u_prime_inv(R * self.beta * avg_marg_u_plus)
m = self.grid_a + c
m = np.insert(m, 0, 0) # add 0 in beginning
c = np.insert(c, 0, 0) # add 0 in beginning
# c. interpolate to common grid
c_raw_func = interpolate.RegularGridInterpolator(
[m], c, method="linear", bounds_error=False, fill_value=None
)
# d. construct interpolator at common grid
c_func_now = interpolate.RegularGridInterpolator(
[self.grid_m],
c_raw_func(self.grid_m),
method="linear",
bounds_error=False,
fill_value=None,
)
c_func.append(c_func_now)
return c_func
def solve_inf_horizon(self):
# a. initial guess (consume everything)
c_func_inf = []
for i_z in range(self.Nz):
# i. consume everything
m = self.grid_m
c = m
# ii. create linear interpolator
interp = interpolate.RegularGridInterpolator(
[m], c, method="linear", bounds_error=False, fill_value=None
)
# iii. append
c_func_inf.append(interp)
# b. solve household problem
diff_cfunc = np.inf
it = 0
while diff_cfunc > self.tol_cfunc_inf:
it += 1
# i. remember previous
c_func_inf_old = c_func_inf
# ii. solve one step further
c_func_inf = self.solve_step(c_func_inf_old, self.R_ss, self.w_ss)
# iii. maximum absolute difference
diff_cfunc = []
for i_z in range(self.Nz):
diff_cfunc.append(
np.amax(np.abs(c_func_inf_old[i_z].values - c_func_inf[i_z].values))
)
diff_cfunc = max(diff_cfunc)
# iv. do not reach 2000 iterations
if it > self.cfunc_maxiter:
break
# c. save interpolators
self.c_func_inf = c_func_inf
# d. save values
self.c_inf = np.empty((self.Nz, self.Nm))
for z in range(self.Nz):
self.c_inf[z, :] = c_func_inf[z].values
def solve_transition_path(self):
# a. allocate memory
self.c_func_transition_path = [None] * self.transT
self.c_transition_path = np.empty((self.transT, self.Nz, self.Nm))
# b. solve backwards along transition path
for t in reversed(range(self.transT)):
# i. solve
if t == self.transT - 1:
c_plus_func = self.c_func_inf
self.c_func_transition_path[t] = self.solve_step(
c_plus_func, self.R_ss, self.w_ss
)
else:
c_plus_func = self.c_func_transition_path[t + 1]
self.c_func_transition_path[t] = self.solve_step(
c_plus_func, self.sim_R[t + 1], self.sim_w[t + 1]
)
# ii. save values
for z in range(self.Nz):
self.c_transition_path[t, z, :] = self.c_func_transition_path[t][
z
].values
#############################
# 4. stationary equilibrium #
#############################
def check_supply_and_demand(self, R_ss_guess, a0, z0, print_results=False):
# a. prices
self.R_ss = R_ss_guess
self.w_ss = self.w_from_R_func(self.R_ss)
# b. solve infinite horizon problem
t0 = time.time()
self.solve_inf_horizon()
time_sol = time.time() - t0
# c. simulate
t0 = time.time()
# prices
self.ss_sim_R = self.R_ss * np.ones(self.ss_simT)
self.ss_sim_w = self.w_ss * np.ones(self.ss_simT)
# simulate
self.ss_sim_k, self.ss_sim_a, self.ss_sim_z = simulate(
a0,
z0,
self.ss_sim_R,
self.ss_sim_w,
self.ss_simN,
self.ss_simT,
self.grid_z,
self.grid_m,
self.c_inf,
self.trans_p_z,
self.unemp_p,
self.unemp_b,
self.c_transition_path,
0,
self.seed,
)
time_sim = time.time() - t0
# d. calculate difference
self.k_ss = np.mean(self.ss_sim_k[self.ss_sim_burnin :])
R_ss_implied = self.R_func(self.k_ss)
diff = R_ss_implied - R_ss_guess
# e. print results
if print_results:
print(
f" guess on R = {R_ss_guess:.5f} -> implied R = {R_ss_implied:.5f} (diff = {diff:8.5f})"
)
# print(f' time to solve = {time_sol:.1f}, time to simulate = {time_sim:.1f}')
return diff
def find_stationary_equilibrium(self, print_results=True):
print(f"find stationary equilibrium (R in [{self.R_low:.5f};{self.R_high:.5f}]")
# a. initial values
a0 = self.ss_a0 * | np.ones(self.ss_simN) | numpy.ones |
"""
point cloud pose estimation module no. 1
Copyright (C) 2020 Siemens AG
SPDX-License-Identifier: MIT for non-commercial use otherwise see license terms
Author 2020 <NAME>
"""
import os
import numpy as np
import torch
import network_bingham
from utils import qrotate_pc, qconjugate, qmult
from pytorch3d.loss import chamfer_distance
num_point = 2048
batch_size = 32
device = torch.device('cuda:0')
nm = 50
net = network_bingham.MBN(num_point, 3, 128, nm)
net = net.to(device)
def eval_cls(cls):
## change the weight_fn to the expected one
weight_fn = 'log_{}/chkpt.pth'.format(cls)
if not os.path.exists(weight_fn):
print('{} not exists.'.format(weight_fn))
return
print('Initializing network')
state_dict = torch.load(weight_fn)
print('loading weights from {}'.format(weight_fn))
net.load_state_dict(state_dict, strict=False)
net.eval()
print('Network initialization done')
test_data_fn = './data/benchmark/{}.npy'.format(cls)
test_data = np.load(test_data_fn, allow_pickle=True)
cd_lst = []
for idx, (pc, gt_q) in enumerate(test_data):
points = torch.from_numpy(pc).float().to(device).reshape(1, num_point, pc.shape[1])
gt_q = torch.from_numpy(gt_q).float().to(device).reshape(1, 4)
pred_q, pred_l, weights = net(points)
rel_q = qmult(pred_q, qconjugate(gt_q))
rel_q_tiled = rel_q.reshape(nm, 1, 4).repeat(1, pc.shape[0], 1).reshape(-1, 4)
points_tiled = points.reshape(1, pc.shape[0], 3).repeat(nm, 1, 1).reshape(-1, 3)
rotated_pc = qrotate_pc(points_tiled, rel_q_tiled)
rotated_pc = rotated_pc.reshape(nm, pc.shape[0], 3)
dists = chamfer_distance(points_tiled.reshape(nm, pc.shape[0], 3), rotated_pc, batch_reduction=None)[0]
best_dist = dists[weights.argmax()].item()
cd_lst.append(best_dist)
print('{}: {}'.format(cls, | np.mean(cd_lst) | numpy.mean |
from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import dshape, isnumeric, Record, Option
from datashape import coretypes as ct
from toolz import concat, unique
import xarray as xr
from datashader.glyphs.glyph import isnull
from .utils import Expr, ngjit
from numba import cuda as nb_cuda
try:
import cudf
except ImportError:
cudf = None
class Preprocess(Expr):
"""Base clase for preprocessing steps."""
def __init__(self, column):
self.column = column
@property
def inputs(self):
return (self.column,)
class extract(Preprocess):
"""Extract a column from a dataframe as a numpy array of values."""
def apply(self, df):
if cudf and isinstance(df, cudf.DataFrame):
import cupy
if df[self.column].dtype.kind == 'f':
nullval = np.nan
else:
nullval = 0
return cupy.array(df[self.column].to_gpu_array(fillna=nullval))
elif isinstance(df, xr.Dataset):
# DataArray could be backed by numpy or cupy array
return df[self.column].data
else:
return df[self.column].values
class category_codes(Preprocess):
"""Extract just the category codes from a categorical column."""
def apply(self, df):
if cudf and isinstance(df, cudf.DataFrame):
return df[self.column].cat.codes.to_gpu_array()
else:
return df[self.column].cat.codes.values
class Reduction(Expr):
"""Base class for per-bin reductions."""
def __init__(self, column=None):
self.column = column
def validate(self, in_dshape):
if not self.column in in_dshape.dict:
raise ValueError("specified column not found")
if not isnumeric(in_dshape.measure[self.column]):
raise ValueError("input must be numeric")
def out_dshape(self, in_dshape):
return self._dshape
@property
def inputs(self):
return (extract(self.column),)
def _build_bases(self, cuda=False):
return (self,)
def _build_temps(self, cuda=False):
return ()
def _build_create(self, dshape):
return self._create
def _build_append(self, dshape, schema, cuda=False):
if cuda:
if self.column is None:
return self._append_no_field_cuda
else:
return self._append_cuda
else:
if self.column is None:
return self._append_no_field
else:
return self._append
def _build_combine(self, dshape):
return self._combine
def _build_finalize(self, dshape):
return self._finalize
class OptionalFieldReduction(Reduction):
"""Base class for things like ``count`` or ``any``"""
def __init__(self, column=None):
self.column = column
@property
def inputs(self):
return (extract(self.column),) if self.column is not None else ()
def validate(self, in_dshape):
pass
@staticmethod
def _finalize(bases, cuda=False, **kwargs):
return xr.DataArray(bases[0], **kwargs)
class count(OptionalFieldReduction):
"""Count elements in each bin.
Parameters
----------
column : str, optional
If provided, only counts elements in ``column`` that are not ``NaN``.
Otherwise, counts every element.
"""
_dshape = dshape(ct.int32)
# CPU append functions
@staticmethod
@ngjit
def _append_no_field(x, y, agg):
agg[y, x] += 1
@staticmethod
@ngjit
def _append(x, y, agg, field):
if not isnull(field):
agg[y, x] += 1
# GPU append functions
@staticmethod
@nb_cuda.jit(device=True)
def _append_no_field_cuda(x, y, agg):
nb_cuda.atomic.add(agg, (y, x), 1)
@staticmethod
@nb_cuda.jit(device=True)
def _append_cuda(x, y, agg, field):
if not isnull(field):
nb_cuda.atomic.add(agg, (y, x), 1)
@staticmethod
def _create(shape, array_module):
return array_module.zeros(shape, dtype='i4')
@staticmethod
def _combine(aggs):
return aggs.sum(axis=0, dtype='i4')
class any(OptionalFieldReduction):
"""Whether any elements in ``column`` map to each bin.
Parameters
----------
column : str, optional
If provided, only elements in ``column`` that are ``NaN`` are skipped.
"""
_dshape = dshape(ct.bool_)
@staticmethod
@ngjit
def _append_no_field(x, y, agg):
agg[y, x] = True
_append_no_field_cuda = _append_no_field
@staticmethod
@ngjit
def _append(x, y, agg, field):
if not isnull(field):
agg[y, x] = True
_append_cuda =_append
@staticmethod
def _create(shape, array_module):
return array_module.zeros(shape, dtype='bool')
@staticmethod
def _combine(aggs):
return aggs.sum(axis=0, dtype='bool')
class FloatingReduction(Reduction):
"""Base classes for reductions that always have floating-point dtype."""
_dshape = dshape(Option(ct.float64))
@staticmethod
def _create(shape, array_module):
return array_module.full(shape, np.nan, dtype='f8')
@staticmethod
def _finalize(bases, cuda=False, **kwargs):
return xr.DataArray(bases[0], **kwargs)
class _sum_zero(FloatingReduction):
"""Sum of all elements in ``column``.
Parameters
----------
column : str
Name of the column to aggregate over. Column data type must be numeric.
``NaN`` values in the column are skipped.
"""
@staticmethod
def _create(shape, array_module):
return array_module.full(shape, 0.0, dtype='f8')
@staticmethod
@ngjit
def _append(x, y, agg, field):
if not isnull(field):
agg[y, x] += field
@staticmethod
@ngjit
def _append_cuda(x, y, agg, field):
if not isnull(field):
nb_cuda.atomic.add(agg, (y, x), field)
@staticmethod
def _combine(aggs):
return aggs.sum(axis=0, dtype='f8')
class sum(FloatingReduction):
"""Sum of all elements in ``column``.
Elements of resulting aggregate are nan if they are not updated.
Parameters
----------
column : str
Name of the column to aggregate over. Column data type must be numeric.
``NaN`` values in the column are skipped.
"""
_dshape = dshape(Option(ct.float64))
# Cuda implementation
def _build_bases(self, cuda=False):
if cuda:
return (_sum_zero(self.column), any(self.column))
else:
return (self,)
@staticmethod
def _finalize(bases, cuda=False, **kwargs):
if cuda:
sums, anys = bases
x = np.where(anys, sums, np.nan)
return xr.DataArray(x, **kwargs)
else:
return xr.DataArray(bases[0], **kwargs)
# Single pass CPU implementation
# These methods will only be called if _build_bases returned (self,)
@staticmethod
@ngjit
def _append(x, y, agg, field):
if not isnull(field):
if isnull(agg[y, x]):
agg[y, x] = field
else:
agg[y, x] += field
@staticmethod
def _combine(aggs):
missing_vals = np.isnan(aggs)
all_empty = np.bitwise_and.reduce(missing_vals, axis=0)
set_to_zero = missing_vals & ~all_empty
return np.where(set_to_zero, 0, aggs).sum(axis=0)
class m2(FloatingReduction):
"""Sum of square differences from the mean of all elements in ``column``.
Intermediate value for computing ``var`` and ``std``, not intended to be
used on its own.
Parameters
----------
column : str
Name of the column to aggregate over. Column data type must be numeric.
``NaN`` values in the column are skipped.
"""
@staticmethod
def _create(shape, array_module):
return array_module.full(shape, 0.0, dtype='f8')
def _build_temps(self, cuda=False):
return (_sum_zero(self.column), count(self.column))
def _build_append(self, dshape, schema, cuda=False):
if cuda:
raise ValueError("""\
The 'std' and 'var' reduction operations are not yet supported on the GPU""")
return super(m2, self)._build_append(dshape, schema, cuda)
@staticmethod
@ngjit
def _append(x, y, m2, field, sum, count):
# sum & count are the results of sum[y, x], count[y, x] before being
# updated by field
if not isnull(field):
if count > 0:
u1 = np.float64(sum) / count
u = np.float64(sum + field) / (count + 1)
m2[y, x] += (field - u1) * (field - u)
@staticmethod
def _combine(Ms, sums, ns):
with | np.errstate(divide='ignore', invalid='ignore') | numpy.errstate |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 14:26:14 2019
@author: ranahamzaintisar
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.linalg import svd
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import accuracy_score as acc
import random
from sklearn.utils import shuffle
'''functions built'''
## Function to split into training and test dataset and create target vector z:
def training_test_split(dataset):
split_data = np.array_split(dataset,10)
training =[]
test = []
for i in range(len(split_data)):
np.random.shuffle(split_data[i])
train_test_split = np.array_split(split_data[i],2)
for item in train_test_split[0]:
if i == 0:
new = np.append(item,10) #class label 10 for digit 0
training.append(new)
else:
new = np.append(item,i) # class labels for other digits
training.append(new)
for item in train_test_split[1]:
if i == 0:
new = np.append(item,10)
test.append(new)
else:
new = np.append(item,i)
test.append(new)
# Training dataset with target vector Z
training_dataset = pd.DataFrame(training)
training_dataset[240] = training_dataset[240].astype('category') # make class label as category
##create dummy variables for the categorical variable i.e target vectors
training_dataset = pd.get_dummies(training_dataset, dummy_na=True, prefix_sep='_' )
## drop nan dummy columns if created
training_dataset = training_dataset.loc[:, training_dataset.nunique(axis=0) > 1]
# Test dataset with target vector Z
test_dataset = pd.DataFrame(test)
test_dataset[240] = test_dataset[240].astype('category') # make class label as category
##create dummy variables for the categorical variable i.e target vectors
test_dataset = pd.get_dummies(test_dataset, dummy_na=True, prefix_sep='_' )
## drop nan dummy columns if created
test_dataset = test_dataset.loc[:, test_dataset.nunique(axis=0) > 1]
return training_dataset , test_dataset
## function to seperate feature vectors from binary target vectors
def split_features_labels(data):
label_col = [x for x in data.columns if isinstance(x, str)]
return (data.drop(label_col, axis=1),
data[label_col])
def split_features_labels_cv(data):
label_col = [x for x in data.columns if x>239]
return (data.drop(label_col, axis=1),
data[label_col])
## function to center the data
def center(df):
cols = df.columns
for field in cols:
mean_field = df[field].mean()
# account for constant columns
if np.all(df[field] - mean_field != 0):
df.loc[:, field] = (df[field] - mean_field)
return df
## Function to find coorelation matrix of the centered data point:
def coor_c(df):
df_matrix = df.as_matrix()
df_matrix_transpose = df_matrix.transpose()
coor_matrix = np.dot(df_matrix_transpose,df_matrix)
n = coor_matrix.shape[1]
normal_coor_matrix = np.multiply(coor_matrix,1/n)
return normal_coor_matrix
##Function Computing the eigenvalues and right eigenvectors of coorelation matrix.
#and returning them in decending order
def eigen(coor_matrix):
#compute the eigen vector and values
eig_val_cov, eig_vec_cov = np.linalg.eig(coorelation_matrix_train )
## sort eigen vector and eigen values from high to low
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_val_cov[i]), eig_vec_cov[:,i]) for i in range(len(eig_val_cov))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort(key=lambda x: x[0], reverse=True)
#seperate the sorted pair
eigen_val_decending =[]
for i in eig_pairs:
eigen_val_decending.append(i[0])
eigen_vec_decending = []
for i in eig_pairs:
eigen_vec_decending.append(i[1])
return eigen_val_decending,eigen_vec_decending
## function to reaturn number of desiered PC features and padded with Bias
def pc_features(eigen_vec,eigen_val,centered_data,num_pc):
s_pc = num_pc
pc_vectors = np.stack(eigen_vec[0:s_pc],axis=0)
pc_eigen_val = np.stack(eigen_val[0:s_pc],axis=0)
pc_features = np.dot(pc_vectors,centered_data.as_matrix().transpose()).transpose()
#add bias to the features:
feat_df= pd.DataFrame(pc_features)
bias = np.full(pc_features.shape[0],1)
feat_df['bias']=bias
features = feat_df.as_matrix()
return features,pc_eigen_val
## Ridge regression function using formula 39 ML notes
def ridge_reg(features,target,a):
##computing the SVD
semi_def_matrix = np.dot(features,features.transpose())
target_matrix = target.as_matrix()
num_data=semi_def_matrix.shape[0]
identity_matrix = np.identity(num_data)
alpha = a
alpha_sq= alpha**2
r_mat = alpha_sq*identity_matrix
ridge_matrix = semi_def_matrix+r_mat
ridge_matrix_inv = np.linalg.inv(ridge_matrix)
wopt_inv= np.matmul(np.matmul(ridge_matrix_inv,features).transpose(),target_matrix)
wopt = wopt_inv.transpose()
## use the wopt to find hypothesis vectors
hypothesis_matrix = np.matmul(wopt,features.transpose()).transpose()
## use hypothesis vectors to find prediction
prediction = []
for row in hypothesis_matrix:
pred = np.zeros_like(row,dtype='int')
index = np.argmax(row)
pred[index]=1
prediction.append(pred)
df_pred = pd.DataFrame(prediction)
pred_matrix = df_pred.as_matrix()
return pred_matrix , target_matrix
def misclass_rate(pred,actual):
return 1-((sum(np.array([np.argmax(a) for a in pred])==np.array([np.argmax(a) for a in actual]))).astype("float")/len(actual))
def meansq_error(pred,actual):
return np.mean((pred - actual)**2)
##cross validation with alpha
def cv_ridge(dataset,no_fold,tune_grid,numdim):
#take the training dataframe with the target vectors
cv_df = dataset.copy()
# make k fold splits
a = []
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for alpha in a:
k=5
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
#center the feature vectors for PCA
centered_train = center(feature_train)
centered_test = center(feature_val)
#find the coorelation matrix (240,240) matrix size
coorelation_matrix_train = coor_c(centered_train)
# Find the eigenvectors and eigen values of the coorelation matrix
eig_val,eig_vec = eigen(coorelation_matrix_train)
# number of PCA features selected=20
# compute the projections of original image vectors in the selected PC directions
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,numdim)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,numdim)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
def cv_ridge_kmeans(dataset,no_fold,tune_grid,cnum):
cv_df = dataset.copy()
# make k fold splits
a = tune_grid
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for alpha in a:
k = no_fold
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
# use the Kmeans for feature selection
new_feat = kmeans_algorithm(feature_train.as_matrix(),cnum)
new_feat_v = kmeans_algorithm(feature_val.as_matrix(),cnum)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(new_feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(new_feat_v,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
### crossvalidation with feature number(PCA features)
def cv_features(dataset,no_fold,tune_grid,alpha):
cv_df = dataset.copy()
a = tune_grid
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for dimnum in a:
k = no_fold
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
#center the feature vectors for PCA
centered_train = center(feature_train)
centered_test = center(feature_val)
#find the coorelation matrix (240,240) matrix size
coorelation_matrix_train = coor_c(centered_train)
# Find the eigenvectors and eigen values of the coorelation matrix
eig_val,eig_vec = eigen(coorelation_matrix_train)
# number of PCA features selected=20
# compute the projections of original image vectors in the selected PC directions
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,dimnum)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,dimnum)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
### exploring with K-means feature
def kmeans_algorithm (dataframe, n):
# create a copy of the 2-d image vecotr array
data_copy = dataframe.copy()
# shuffe 2-d image vector arrays along the first axis(row)
np.random.shuffle(data_copy)
# take the first n image vector arrays, from the shuffeld 2-d image vector array, as initial random codebook vector assignments
codebook = data_copy[:n]
# Compute the eucledian disance between vector arrays in dataset and the randomly selected codebook vectors
# substract each codebook vector from the dataset image vectors.
# numpy broadcasting allows to substract all dataset image vectors form the codebook vector array even if their shape dont match.
# Step 1: extend the codebook vector array by adding a new dimension in between the two existing dimensions
# extending a new dimension allows us to use the rule of broadcasting- array of unequal dimension are when compatable if one the array dimension is 1 here the extended dimension is 1.
extend_codebook = codebook[:,np.newaxis]
# Step 2: substract extended codebook vector array form image vector array
difference = dataset - extend_codebook
#find the absolute distance from the difference, abs distance = ||difference||
abs_dist_extended = np.sqrt((difference)**2)
#reduce the 3-d absolute distance array back into a 2-d array
abs_dist = abs_dist_extended.sum(axis=2)
# compute an array of index for each vector in the dataset; the index value will be the nearest index of the nearest codebook vector from the data image vector.
nearest_codebook = np.argmin(abs_dist,axis=0)
#assign new codebook vectors, as mean of the dataset image vectors that lie closest to a particular codebook vector assigned above
new_codebook = np.array([dataset[nearest_codebook==i].mean(axis=0) for i in range(codebook.shape[0])])
#distance of points from new coodebook vectors taken as features
extend_new_codebook = new_codebook[:,np.newaxis]
diff_new_codebook = dataframe - extend_new_codebook
abs_new_codebook = np.sqrt((diff_new_codebook)**2)
abs_new = abs_new_codebook.sum(axis=2).T
feat_df= pd.DataFrame(abs_new)
bias = np.full(abs_new.shape[0],1)
feat_df['bias']=bias
features = feat_df.as_matrix()
return features
def cv_features_kmeans(dataset,no_fold,tune_grid,alpha):
cv_df = dataset.copy()
# make k fold splits
a = tune_grid
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for cnum in a:
k = no_fold
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
# use the Kmeans for feature selection
new_feat = kmeans_algorithm(feature_train.as_matrix(),cnum)
new_feat_v = kmeans_algorithm(feature_val.as_matrix(),cnum)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(new_feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(new_feat_v,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
''' Function calls and regression models'''
### Data-preperation:
random.seed(1)
filepath = "mfeat-pix.txt"
dataset = np.loadtxt(filepath)
data_copy = dataset.copy()
training, test = training_test_split(data_copy)
feature_train,target_train = split_features_labels(training)
feature_test,target_test =split_features_labels(test)
### PCA features and linear regression
#center the feature vectors for PCA
centered_train = center(feature_train)
centered_test = center(feature_test)
#find the coorelation matrix (240,240) matrix size
coorelation_matrix_train = coor_c(centered_train)
# Find the eigenvectors and eigen values of the coorelation matrix
eig_val,eig_vec = eigen(coorelation_matrix_train)
# number of PCA features selected=240
# compute the projections of original image vectors in the selected PC directions
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,240)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,240)
## run the regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,0)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
#Predict for validation set
#fit the reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_test,0)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
print('mse_train:',mse_train)
print('miss_train:',miss_train)
print('mse_test:',ms_val)
print('miss_test:',miss_val)
#explore the eigen values graphically to see eigen value spectrum againt number of PC
plt.plot(eig_val)
plt.plot([50, 50], [-100, 800], '--', lw=1 ,color ='r')
plt.xlabel('Principle Component dimensions')
plt.ylabel('eigen value')
plt.title('Eigen value spectrum of PC dimensions')
#explore the PC below 50
plt.plot(eig_val[0:50])
plt.plot([20, 20], [-100, 800], '--', lw=1 ,color ='r')
plt.xlabel('Principle Component dimensions')
plt.ylabel('eigen value')
plt.title('Eigen value spectrum of PC dimensions')
# linear regression with 20 PCA features
# number of PCA features selected=20
# compute the projections of original image vectors in the selected PC directions
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,20)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,20)
## run the regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,0)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
#Predict for validation set
#fit the reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_test,0)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
print('mse_train:',mse_train)
print('miss_train:',miss_train)
print('mse_test:',ms_val)
print('miss_test:',miss_val)
#cv for optimal number of PCA features
mse_tr_a,m_tr_a,mse_val_a,m_val_a = cv_features(training,5,[x for x in range(10,240,20)],0)
#plotting the CV evaluation metrics
a = [x for x in range(10,240,20)]
i_mse = np.argmin(mse_val_a)
i_miss = np.argmin(m_val_a)
l1,=plt.plot(a,m_tr_a,color='blue',label="l1")
l2,=plt.plot(a,m_val_a,color='red',label="l2")
l3,=plt.plot(a,mse_tr_a,"--",color='blue',label="l3")
l4,=plt.plot(a,mse_val_a,"--",color = 'red',label="l4")
plt.plot(a[i_mse],mse_tr_a[i_mse],'o',color='k')
plt.plot(a[i_miss],m_tr_a[i_miss],'x',color='k')
plt.xlabel('number of features')
plt.ylabel('error rates')
plt.title('Train and Validate error rates for number of features')
plt.legend([l1,l2,l3,l4], ["MISS(validate)", "MISS(train)","MSE(validate)","MSE(train)"])
plt.show()
#cv for optimal alpha value
mse_tr_a,m_tr_a,mse_val_a,m_val_a = cv_ridge(training,5,np.arange(0.0, 1.0, 0.1) ,90)
#plotting the CV evaluation metrics
a = np.arange(0.0, 1.0, 0.1)
i_mse = np.argmin(mse_tr_a)
i_miss = np.argmin(m_tr_a)
l1,=plt.plot(a,m_tr_a,color='blue',label="l1")
l2,=plt.plot(a,m_val_a,color='red',label="l2")
l3,=plt.plot(a,mse_tr_a,"--",color='blue',label="l3")
l4,=plt.plot(a,mse_val_a,"--",color = 'red',label="l4")
plt.plot(a[i_mse],mse_tr_a[i_mse],'o',color='k')
plt.plot(a[i_miss],m_tr_a[i_miss],'x',color='k')
plt.xlabel('ridge regression tuning parameter(alpha)')
plt.ylabel('error rates')
plt.title('Train and Validate error rates for choice of alpha')
plt.legend([l1,l2,l3,l4], ["MISS(validate)", "MISS(train)","MSE(validate)","MSE(train)"])
plt.show()
#model with optimal features and alpha
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,90)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,90)
## run the regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,0.3)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
#Predict for validation set
#fit the reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_test,0.3)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
print('mse_train:',mse_train)
print('miss_train:',miss_train)
print('mse_test:',ms_val)
print('miss_test:',miss_val)
## linear regression with k means feature
# linear regression with k=40
new_feat = kmeans_algorithm(feature_train.as_matrix(),100)
new_feat_v = kmeans_algorithm(feature_test.as_matrix(),100)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(new_feat,target_train,0)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
#Predict for test set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(new_feat_v,target_test,0)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
print('mse_train:',mse_train)
print('miss_train:',miss_train)
print('mse_test:',ms_val)
print('miss_test:',miss_val)
#cross validation for optimal number of centroids
mse_tr_a,m_tr_a,mse_val_a,m_val_a = cv_features_kmeans(training,5,[x for x in range(50,1000,50)],0)
#plotting the CV evaluation metrics
a = [x for x in range(50,1000,50)]
i_mse = np.argmin(mse_tr_a)
i_miss = | np.argmin(m_tr_a) | numpy.argmin |
from __future__ import print_function, division, absolute_import
import copy as copylib
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.testutils import reseed
import imgaug.random as iarandom
NP_VERSION = np.__version__
IS_NP_117_OR_HIGHER = (
NP_VERSION.startswith("2.")
or NP_VERSION.startswith("1.25")
or NP_VERSION.startswith("1.24")
or NP_VERSION.startswith("1.23")
or NP_VERSION.startswith("1.22")
or NP_VERSION.startswith("1.21")
or NP_VERSION.startswith("1.20")
or NP_VERSION.startswith("1.19")
or NP_VERSION.startswith("1.18")
or NP_VERSION.startswith("1.17")
)
class _Base(unittest.TestCase):
def setUp(self):
reseed()
class TestConstants(_Base):
def test_supports_new_np_rng_style_is_true(self):
assert iarandom.SUPPORTS_NEW_NP_RNG_STYLE is IS_NP_117_OR_HIGHER
def test_global_rng(self):
iarandom.get_global_rng() # creates global RNG upon first call
assert iarandom.GLOBAL_RNG is not None
class TestRNG(_Base):
@mock.patch("imgaug.random.normalize_generator_")
def test___init___calls_normalize_mocked(self, mock_norm):
_ = iarandom.RNG(0)
mock_norm.assert_called_once_with(0)
def test___init___with_rng(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng2.generator is rng1.generator
@mock.patch("imgaug.random.get_generator_state")
def test_state_getter_mocked(self, mock_get):
mock_get.return_value = "mock"
rng = iarandom.RNG(0)
result = rng.state
assert result == "mock"
mock_get.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.RNG.set_state_")
def test_state_setter_mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
rng.state = state
mock_set.assert_called_once_with(state)
@mock.patch("imgaug.random.set_generator_state_")
def test_set_state__mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
result = rng.set_state_(state)
assert result is rng
mock_set.assert_called_once_with(rng.generator, state)
@mock.patch("imgaug.random.set_generator_state_")
def test_use_state_of__mocked(self, mock_set):
rng1 = iarandom.RNG(0)
rng2 = mock.MagicMock()
state = {"foo"}
rng2.state = state
result = rng1.use_state_of_(rng2)
assert result == rng1
mock_set.assert_called_once_with(rng1.generator, state)
@mock.patch("imgaug.random.get_global_rng")
def test_is_global__is_global__rng_mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1.generator)
mock_get.return_value = rng2
assert rng1.is_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_is_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
# different instance with same state/seed should still be viewed as
# different by the method
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.is_global_rng() is False
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is False
@mock.patch("imgaug.random.generate_seed_")
def test_generate_seed__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = -1
seed = rng.generate_seed_()
assert seed == -1
mock_gen.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.generate_seeds_")
def test_generate_seeds__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = [-1, -2]
seeds = rng.generate_seeds_(2)
assert seeds == [-1, -2]
mock_gen.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.reset_generator_cache_")
def test_reset_cache__mocked(self, mock_reset):
rng = iarandom.RNG(0)
result = rng.reset_cache_()
assert result is rng
mock_reset.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rng__mocked(self, mock_derive):
gen = iarandom.convert_seed_to_generator(0)
mock_derive.return_value = [gen]
rng = iarandom.RNG(0)
result = rng.derive_rng_()
assert result.generator is gen
mock_derive.assert_called_once_with(rng.generator, 1)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rngs__mocked(self, mock_derive):
gen1 = iarandom.convert_seed_to_generator(0)
gen2 = iarandom.convert_seed_to_generator(1)
mock_derive.return_value = [gen1, gen2]
rng = iarandom.RNG(0)
result = rng.derive_rngs_(2)
assert result[0].generator is gen1
assert result[1].generator is gen2
mock_derive.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.is_generator_equal_to")
def test_equals_mocked(self, mock_equal):
mock_equal.return_value = "foo"
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
result = rng1.equals(rng2)
assert result == "foo"
mock_equal.assert_called_once_with(rng1.generator, rng2.generator)
def test_equals_identical_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng1.equals(rng2)
def test_equals_with_similar_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
assert rng1.equals(rng2)
def test_equals_with_different_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
assert not rng1.equals(rng2)
def test_equals_with_advanced_generator(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
rng2.advance_()
assert not rng1.equals(rng2)
@mock.patch("imgaug.random.advance_generator_")
def test_advance__mocked(self, mock_advance):
rng = iarandom.RNG(0)
result = rng.advance_()
assert result is rng
mock_advance.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.copy_generator")
def test_copy_mocked(self, mock_copy):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_copy.return_value = rng2.generator
result = rng1.copy()
assert result.generator is rng2.generator
mock_copy.assert_called_once_with(rng1.generator)
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = True
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is rng
mock_is_global.assert_called_once_with()
assert mock_copy.call_count == 0
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_not_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = False
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is "foo"
mock_is_global.assert_called_once_with()
mock_copy.assert_called_once_with()
def test_duplicate(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(1)
assert rngs == [rng]
def test_duplicate_two_entries(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(2)
assert rngs == [rng, rng]
@mock.patch("imgaug.random.create_fully_random_generator")
def test_create_fully_random_mocked(self, mock_create):
gen = iarandom.convert_seed_to_generator(0)
mock_create.return_value = gen
rng = iarandom.RNG.create_fully_random()
mock_create.assert_called_once_with()
assert rng.generator is gen
@mock.patch("imgaug.random.derive_generators_")
def test_create_pseudo_random__mocked(self, mock_get):
rng_glob = iarandom.get_global_rng()
rng = iarandom.RNG(0)
mock_get.return_value = [rng.generator]
result = iarandom.RNG.create_pseudo_random_()
assert result.generator is rng.generator
mock_get.assert_called_once_with(rng_glob.generator, 1)
@mock.patch("imgaug.random.polyfill_integers")
def test_integers_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
result = rng.integers(low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
assert result == "foo"
mock_func.assert_called_once_with(
rng.generator, low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
@mock.patch("imgaug.random.polyfill_random")
def test_random_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
out = np.zeros((1,), dtype="float64")
result = rng.random(size=(1,), dtype="float64", out=out)
assert result == "foo"
mock_func.assert_called_once_with(
rng.generator, size=(1,), dtype="float64", out=out)
# TODO below test for generator methods are all just mock-based, add
# non-mocked versions
def test_choice_mocked(self):
self._test_sampling_func("choice", a=[1, 2, 3], size=(1,),
replace=False, p=[0.1, 0.2, 0.7])
def test_bytes_mocked(self):
self._test_sampling_func("bytes", length=[10])
def test_shuffle_mocked(self):
mock_gen = mock.MagicMock()
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng.shuffle([1, 2, 3])
mock_gen.shuffle.assert_called_once_with([1, 2, 3])
def test_permutation_mocked(self):
mock_gen = mock.MagicMock()
rng = iarandom.RNG(0)
rng.generator = mock_gen
mock_gen.permutation.return_value = "foo"
result = rng.permutation([1, 2, 3])
assert result == "foo"
mock_gen.permutation.assert_called_once_with([1, 2, 3])
def test_beta_mocked(self):
self._test_sampling_func("beta", a=1.0, b=2.0, size=(1,))
def test_binomial_mocked(self):
self._test_sampling_func("binomial", n=10, p=0.1, size=(1,))
def test_chisquare_mocked(self):
self._test_sampling_func("chisquare", df=2, size=(1,))
def test_dirichlet_mocked(self):
self._test_sampling_func("dirichlet", alpha=0.1, size=(1,))
def test_exponential_mocked(self):
self._test_sampling_func("exponential", scale=1.1, size=(1,))
def test_f_mocked(self):
self._test_sampling_func("f", dfnum=1, dfden=2, size=(1,))
def test_gamma_mocked(self):
self._test_sampling_func("gamma", shape=1, scale=1.2, size=(1,))
def test_geometric_mocked(self):
self._test_sampling_func("geometric", p=0.5, size=(1,))
def test_gumbel_mocked(self):
self._test_sampling_func("gumbel", loc=0.1, scale=1.1, size=(1,))
def test_hypergeometric_mocked(self):
self._test_sampling_func("hypergeometric", ngood=2, nbad=4, nsample=6,
size=(1,))
def test_laplace_mocked(self):
self._test_sampling_func("laplace", loc=0.5, scale=1.5, size=(1,))
def test_logistic_mocked(self):
self._test_sampling_func("logistic", loc=0.5, scale=1.5, size=(1,))
def test_lognormal_mocked(self):
self._test_sampling_func("lognormal", mean=0.5, sigma=1.5, size=(1,))
def test_logseries_mocked(self):
self._test_sampling_func("logseries", p=0.5, size=(1,))
def test_multinomial_mocked(self):
self._test_sampling_func("multinomial", n=5, pvals=0.5, size=(1,))
def test_multivariate_normal_mocked(self):
self._test_sampling_func("multivariate_normal", mean=0.5, cov=1.0,
size=(1,), check_valid="foo", tol=1e-2)
def test_negative_binomial_mocked(self):
self._test_sampling_func("negative_binomial", n=10, p=0.5, size=(1,))
def test_noncentral_chisquare_mocked(self):
self._test_sampling_func("noncentral_chisquare", df=0.5, nonc=1.0,
size=(1,))
def test_noncentral_f_mocked(self):
self._test_sampling_func("noncentral_f", dfnum=0.5, dfden=1.5,
nonc=2.0, size=(1,))
def test_normal_mocked(self):
self._test_sampling_func("normal", loc=0.5, scale=1.0, size=(1,))
def test_pareto_mocked(self):
self._test_sampling_func("pareto", a=0.5, size=(1,))
def test_poisson_mocked(self):
self._test_sampling_func("poisson", lam=1.5, size=(1,))
def test_power_mocked(self):
self._test_sampling_func("power", a=0.5, size=(1,))
def test_rayleigh_mocked(self):
self._test_sampling_func("rayleigh", scale=1.5, size=(1,))
def test_standard_cauchy_mocked(self):
self._test_sampling_func("standard_cauchy", size=(1,))
def test_standard_exponential_np117_mocked(self):
fname = "standard_exponential"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"size": (1,), "dtype": "float16", "method": "foo",
"out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_exponential_np116_mocked(self):
fname = "standard_exponential"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"size": (1,), "dtype": "float16", "method": "foo",
"out": arr_out}
kwargs_subcall = {"size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_gamma_np117_mocked(self):
fname = "standard_gamma"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"shape": 1.0, "size": (1,), "dtype": "float16", "out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_gamma_np116_mocked(self):
fname = "standard_gamma"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"shape": 1.0, "size": (1,), "dtype": "float16",
"out": arr_out}
kwargs_subcall = {"shape": 1.0, "size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_normal_np117_mocked(self):
fname = "standard_normal"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"size": (1,), "dtype": "float16", "out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_normal_np116_mocked(self):
fname = "standard_normal"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"size": (1,), "dtype": "float16", "out": arr_out}
kwargs_subcall = {"size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_t_mocked(self):
self._test_sampling_func("standard_t", df=1.5, size=(1,))
def test_triangular_mocked(self):
self._test_sampling_func("triangular", left=1.0, mode=1.5, right=2.0,
size=(1,))
def test_uniform_mocked(self):
self._test_sampling_func("uniform", low=0.5, high=1.5, size=(1,))
def test_vonmises_mocked(self):
self._test_sampling_func("vonmises", mu=1.0, kappa=1.5, size=(1,))
def test_wald_mocked(self):
self._test_sampling_func("wald", mean=0.5, scale=1.0, size=(1,))
def test_weibull_mocked(self):
self._test_sampling_func("weibull", a=1.0, size=(1,))
def test_zipf_mocked(self):
self._test_sampling_func("zipf", a=1.0, size=(1,))
@classmethod
def _test_sampling_func(cls, fname, *args, **kwargs):
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
#
# outdated methods from RandomState
#
def test_rand_mocked(self):
self._test_sampling_func_alias("rand", "random", 1, 2, 3)
def test_randint_mocked(self):
self._test_sampling_func_alias("randint", "integers", 0, 100)
def randn(self):
self._test_sampling_func_alias("randn", "standard_normal", 1, 2, 3)
def random_integers(self):
self._test_sampling_func_alias("random_integers", "integers", 1, 2)
def random_sample(self):
self._test_sampling_func_alias("random_sample", "uniform", (1, 2, 3))
def tomaxint(self):
self._test_sampling_func_alias("tomaxint", "integers", (1, 2, 3))
def test_rand(self):
result = iarandom.RNG(0).rand(10, 20, 3)
assert result.dtype.name == "float32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 0.0)
assert np.all(result <= 1.0)
assert np.any(result > 0.0)
assert np.any(result < 1.0)
def test_randint(self):
result = iarandom.RNG(0).randint(10, 100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 10)
assert np.all(result <= 99)
assert np.any(result > 10)
assert np.any(result < 99)
def test_randn(self):
result = iarandom.RNG(0).randn(10, 50, 3)
assert result.dtype.name == "float32"
assert result.shape == (10, 50, 3)
assert np.any(result > 0.5)
assert np.any(result < -0.5)
assert np.average(np.logical_or(result < 2.0, result > -2.0)) > 0.5
def test_random_integers(self):
result = iarandom.RNG(0).random_integers(10, 100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert | np.all(result >= 10) | numpy.all |
#!/usr/bin/env python3
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
# Get argument first
mobilenet_path = str((Path(__file__).parent / Path('models/mobilenet.blob')).resolve().absolute())
if len(sys.argv) > 1:
mobilenet_path = sys.argv[1]
pipeline = dai.Pipeline()
cam = pipeline.createColorCamera()
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
videoEncoder = pipeline.createVideoEncoder()
videoEncoder.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN)
cam.video.link(videoEncoder.input)
videoOut = pipeline.createXLinkOut()
videoOut.setStreamName('h265')
videoEncoder.bitstream.link(videoOut.input)
left = pipeline.createMonoCamera()
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
right = pipeline.createMonoCamera()
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
depth = pipeline.createStereoDepth()
depth.setConfidenceThreshold(200)
# Note: the rectified streams are horizontally mirrored by default
depth.setOutputRectified(True)
depth.setRectifyEdgeFillColor(0) # Black, to better see the cutout
left.out.link(depth.left)
right.out.link(depth.right)
detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(mobilenet_path)
xout_depth = pipeline.createXLinkOut()
xout_depth.setStreamName("depth")
depth.disparity.link(xout_depth.input)
xout_right = pipeline.createXLinkOut()
xout_right.setStreamName("rect_right")
depth.rectifiedRight.link(xout_right.input)
manip = pipeline.createImageManip()
manip.initialConfig.setResize(300, 300)
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
depth.rectifiedRight.link(manip.inputImage)
manip.out.link(detection_nn.input)
xout_manip = pipeline.createXLinkOut()
xout_manip.setStreamName("manip")
manip.out.link(xout_manip.input)
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
detection_nn.out.link(xout_nn.input)
# MobilenetSSD label texts
texts = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# Pipeline defined, now the device is connected to
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
q_right = device.getOutputQueue(name="rect_right", maxSize=8, blocking=False)
q_manip = device.getOutputQueue(name="manip", maxSize=8, blocking=False)
q_depth = device.getOutputQueue(name="depth", maxSize=8, blocking=False)
q_nn = device.getOutputQueue(name="nn", maxSize=8, blocking=False)
q_rgb_enc = device.getOutputQueue(name="h265", maxSize=30, blocking=True)
frame_right = None
frame_manip = None
frame_depth = None
bboxes = []
labels = []
def frame_norm(frame, bbox):
norm_vals = np.full(len(bbox), frame.shape[0])
norm_vals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)
videoFile = open('video.h265','wb')
while True:
in_right = q_right.tryGet()
in_manip = q_manip.tryGet()
in_nn = q_nn.tryGet()
in_depth = q_depth.tryGet()
while q_rgb_enc.has():
q_rgb_enc.get().getData().tofile(videoFile)
if in_right is not None:
shape = (in_right.getHeight(), in_right.getWidth())
frame_right = in_right.getData().reshape(shape).astype(np.uint8)
frame_right = np.ascontiguousarray(frame_right)
if in_manip is not None:
shape = (3, in_manip.getHeight(), in_manip.getWidth())
frame_manip = in_manip.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame_manip = np.ascontiguousarray(frame_manip)
if in_nn is not None:
bboxes = np.array(in_nn.getFirstLayerFp16())
bboxes = bboxes.reshape((bboxes.size // 7, 7))
bboxes = bboxes[bboxes[:, 2] > 0.5]
# Cut bboxes and labels
labels = bboxes[:, 1].astype(int)
bboxes = bboxes[:, 3:7]
if in_depth is not None:
frame_depth = in_depth.getData().reshape((in_depth.getHeight(), in_depth.getWidth())).astype(np.uint8)
frame_depth = | np.ascontiguousarray(frame_depth) | numpy.ascontiguousarray |
from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spin_basis_general
import numpy as np
L = 13
for k in range(L):
for q in range(L):
print("testing k={} -> k+q={}".format(k,(k+q)%L))
# use standard static list for this.
# use generators to generate coupling list
op_list = [["z",[i],np.exp(-2j*np.pi*q*i/L)] for i in range(L)]
#coupling=[[np.exp(-2j*np.pi*q*i/L),i] for i in range(L)]
#op_list = [["z",coupling]]
t = (np.arange(L)+1)%L
b = spin_basis_general(L)
b1 = spin_basis_general(L,kblock=(t,k))
b2 = spin_basis_general(L,kblock=(t,k+q))
# print(b1)
# print(b2)
P1 = b1.get_proj(np.complex128)
P2 = b2.get_proj(np.complex128)
v_in = np.random.normal(0,1,size=b1.Ns) + 1j*np.random.normal(0,1,size=b1.Ns)
v_in /= np.linalg.norm(v_in)
v_in_full = P1.dot(v_in)
v_out_full = b.inplace_Op(v_in_full,op_list,np.complex128)
v_out_proj = P2.H.dot(v_out_full)
# swap b1 with op_list in arguments.
v_out = b2.Op_shift_sector(b1,op_list,v_in)
np.testing.assert_allclose(v_out,v_out_proj,rtol=0, atol=1e-13)
for q1 in [0,1]:
for q2 in [0,1]:
i = 1
print("testing q1={} -> q2={}".format(q1,q2))
op_list = [["z",[i],1],["z",[L-i-1],(-1)**q2]]
p = np.arange(L)[::-1]
# z = -(np.arange(L)+1)
b = spin_basis_general(L)
b1 = spin_basis_general(L,block=(p,q1))
b2 = spin_basis_general(L,block=(p,q1+q2))
# print(b1)
# print(b2)
P1 = b1.get_proj(np.complex128)
P2 = b2.get_proj(np.complex128)
v_in = np.random.normal(0,1,size=b1.Ns) + 1j*np.random.normal(0,1,size=b1.Ns)
v_in /= np.linalg.norm(v_in)
v_in_full = P1.dot(v_in)
v_out_full = b.inplace_Op(v_in_full,op_list,np.complex128)
v_out_proj = P2.H.dot(v_out_full)
v_out = b2.Op_shift_sector(b1,op_list,v_in)
np.testing.assert_allclose(v_out,v_out_proj,rtol=0, atol=1e-13)
for Nup in range(0,L):
print("testig Nup={} -> Nup={}".format(Nup,Nup+1))
opp_list = [["+",[i],1.0] for i in range(L)]
b = spin_basis_general(L)
b1 = spin_basis_general(L,Nup=Nup)
b2 = spin_basis_general(L,Nup=Nup+1)
P1 = b1.get_proj(np.complex128)
P2 = b2.get_proj(np.complex128)
v_in = np.random.normal(0,1,size=b1.Ns) + 1j* | np.random.normal(0,1,size=b1.Ns) | numpy.random.normal |
####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
import math
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_routines/'))
from functools import lru_cache
####Please do not remove lines above####
####Import your modules below if needed####
# from xr_ref import parratt_numba
from numba import njit, prange
@njit(parallel=True,cache=True)
def parratt_numba(q,lam,d,rho,beta):
ref=np.ones_like(q)
refc=np.ones_like(q)*complex(1.0,0.0)
f1=16.0*np.pi*2.818e-5
f2=-32.0*np.pi**2/lam**2
Nl=len(d)
for j in range(len(q)):
r=complex(0.0,0.0)
for it in range(1,Nl):
i=Nl-it
qc1=f1*(rho[i-1]-rho[0])
qc2=f1*(rho[i]-rho[0])
k1=np.sqrt(complex(q[j]**2-qc1,f2*beta[i-1]))
k2=np.sqrt(complex(q[j]**2-qc2,f2*beta[i]))
X=(k1-k2)/(k1+k2)
fact1=complex(np.cos(k2.real*d[i]),np.sin(k2.real*d[i]))
fact2=np.exp(-k2.imag*d[i])
fact=fact1*fact2
r=(X+r*fact)/(1.0+X*r*fact)
ref[j]=np.abs(r)**2
refc[j]=r
return ref,r
class XLayers_Triphasic: #Please put the class name same as the function name
def __init__(self,x=0.1,E=10.0,mpar={'Phase1':{'Layers':['top','bottom'],'d':[0.0,1.0],'rho':[0.0,0.333],'mu':[0.0,0.0],'sig':[0.0,3.0]},
'Phase2':{'Layers':['top','bottom'],'d':[0.0,1.0],'rho':[0.0,0.333],'mu':[0.0,0.0],'sig':[0.0,3.0]},
'Phase3':{'Layers':['top','bottom'],'d':[0.0,1.0],'rho':[0.0,0.333],'mu':[0.0,0.0],'sig':[0.0,3.0]}},
dz=0.5, rrf=True, fix_sig=False, qoff=0.0, yscale=1,cov1=0.33, cov2=0.33, bkg=0.0, coherrent=False, aveed=True):
"""
Calculates X-ray reflectivity from a system of multiple layers using Parratt formalism
x : array of wave-vector transfer along z-direction
E : Energy of x-rays in invers units of x
dz :The thickness (Angstrom) of each layer for applying Parratt formalism
rrf : True for Frensnel normalized refelctivity and False for just reflectivity
qoff : q-offset to correct the zero q of the instrument
cov1 : The coverage of Phase1 the value should be between 0 and 1
yscale : a scale factor for R or R/Rf
bkg : In-coherrent background
coherrent: True or False for coherrent or incoherrent addition of reflectivities from different phases
fix_sig : True or False for constraining or not constraining all the roughness parameters to the roughness of the bare interface roughness
mpar : Dictionary of Phases where,
Layers: Layer description,
d: thickness of each layer in Angs,
rho:Electron density of each layer in el/Angs^3,
mu: Absorption length of each layer in 1/cm,
sig: roughness of interface separating each layer in Angs.
The upper and lower thickness should be always fixed. The roughness of the topmost layer should be always kept 0.
"""
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.E=E
self.__mpar__=mpar
self.dz=dz
self.rrf=rrf
self.fix_sig=fix_sig
self.qoff=qoff
self.bkg=bkg
self.yscale=yscale
self.coherrent=coherrent
self.aveed=aveed
self.cov1=cov1
self.cov2=cov2
self.choices={'rrf':[True,False],'fix_sig': [True,False],'coherrent':[True,False],'aveed':[True,False]}
self.__d__={}
self.__rho__={}
self.__mu__={}
self.__sig__={}
self.__fit__=False
self.__mkeys__ = list(self.__mpar__.keys())
self.output_params = {'scaler_parameters': {}}
self.init_params()
def init_params(self):
"""
Define all the fitting parameters like
self.param.add('sig',value=0,vary=0)
"""
self.params=Parameters()
self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('yscale', self.yscale, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('cov1', self.cov1, vary=0, min=0, max=1, expr=None, brute_step=0.1)
self.params.add('cov2', self.cov2, vary=0, min=0, max=1, expr=None, brute_step=0.1)
self.params.add('bkg', self.bkg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)
for mkey in self.__mpar__.keys():
for key in self.__mpar__[mkey].keys():
if key!='Layers':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d' % (mkey,key, i), value=self.__mpar__[mkey][key][i], vary=0, min=0, max=np.inf, expr=None, brute_step=0.05)
@lru_cache(maxsize=10)
def calcProfile(self,d,rho,mu,sig,phase,dz,zmin=None,zmax=None):
"""
Calculates the electron and absorption density profiles
"""
d = np.array(d)
rho = np.array(rho)
mu = np.array(mu)
sig = np.array(sig)
if self.fix_sig:
for i in range(1,len(sig)):
sig[i]=sig[1]
n=len(d)
if zmin is None and zmax is None:
maxsig=max(np.abs(np.max(sig[1:])),3)
Nlayers=int((np.sum(d[:-1])+10*maxsig)/dz)
halfstep=(np.sum(d[:-1])+10*maxsig)/2/Nlayers
__z__=np.linspace(-5*maxsig+halfstep,np.sum(d[:-1])+5*maxsig-halfstep,Nlayers)
offset=0.0
else:
Nlayers=int((zmax-zmin)/dz)
__z__=np.linspace(zmin,zmax,Nlayers+1)
offset=np.sum(d[:-1])
__d__=np.diff(__z__)
__d__=np.append(__d__,[__d__[-1]])
__rho__=self.sldCalFun(tuple(d),tuple(rho),tuple(sig), tuple(__z__),offset=offset)
__mu__=self.sldCalFun(tuple(d),tuple(mu),tuple(sig), tuple(__z__),offset=offset)
return n,__z__,__d__,__rho__,__mu__
@lru_cache(maxsize=10)
def sldCalFun(self,d,y,sigma,x,offset=0.0):
wholesld=[]
for j in range(len(x)):
sld=0
for i in range(len(d)-1):
pos=np.sum(d[:i+1])
sld=sld+math.erf((x[j]-pos+offset)/sigma[i+1]/math.sqrt(2))*(y[i+1]-y[i])
wholesld.append(max((sld+y[0]+y[-1])/2,0))
return np.array(wholesld)
@lru_cache(maxsize=10)
def stepFun(self, zmin, zmax, d, rho, mu):
tdata = [[zmin, rho[0], mu[0]]]
z = np.cumsum(d)
for i, td in enumerate(d[:-1]):
tdata.append([z[i], rho[i], mu[i]])
tdata.append([z[i], rho[i + 1], mu[i + 1]])
tdata.append([zmax, rho[-1], mu[-1]])
tdata = np.array(tdata)
return tdata[:, 0], tdata[:, 1], tdata[:, 2]
@lru_cache(maxsize=10)
def py_parratt(self,x,lam,d,rho,mu):
return parratt_numba(np.array(x),lam,np.array(d), | np.array(rho) | numpy.array |
import os
import numpy as np
from pathlib import Path
import json
from PIL import Image
def normalize(v):
'''
This function returns the normalized vector v
'''
norm = np.linalg.norm(v)
if norm ==0:
return v
return v/norm
def find_peaks(a, reach=10):
'''
This function returns the indicies of all non-zero values in array a that are
the maximum of a square of length 2 x reach centered at it's location. The
default square size is 21x21
'''
rows, columns = np.shape(a)
# create a new array that is just a with a border of all 0s so we don't have to deal with edge cases
new_a = np.zeros([rows+(2*reach),columns+(2*reach)])
new_a[reach:-reach,reach:-reach] = a
peaks = []
# loop through each element in a
for r in range(reach,rows+reach):
for c in range(reach,columns+reach):
mid = new_a[r][c]
# first check if the value is nonzero
if mid!= 0:
# check if the value is the max value of the square
if mid == np.amax(new_a[r-reach:r+reach+1,c-reach:c+reach+1]):
peaks.append([r-reach,c-reach])
return peaks
def get_circle_kernel(size, color):
'''
This function defines a square kernel matrix of size (2*size+1) X (2*size+1)
for a circle with the color given by the [R,G,B] in the top 1/3 of the rectangle
with the value of color with a black background
'''
# first check if size is a valid number
if size < 1:
print('size cannot be < 1')
return
# define the size of the square kernel matix
k = np.zeros((2*size+1,2*size+1,3))
for r in range(2*size+1):
for c in range(2*size+1):
if np.sqrt((r-size)**2+(c-size)**2) < size:
k[r][c] = color
return k
def shrink_kernel(k):
'''
This function shrinks the size of the kernel by 2 rows and 2 columns
by averaging each pixel RBG value with it's neighboring pixels
'''
new_k = np.zeros((np.shape(k)[0]-2,np.shape(k)[1]-2,3))
for r in range(np.shape(new_k)[0]):
for c in range(np.shape(new_k)[1]):
red_square = k[r:r+3,c:c+3,0]
green_square = k[r:r+3,c:c+3,1]
blue_square = k[r:r+3,c:c+3,2]
new_k[r][c] = [np.average(red_square),np.average(green_square),np.average(blue_square)]
return new_k
def detect_red_light(I,k,threshold = 0.95):
'''
This function takes a numpy array <I> and returns a list <bounding_boxes>.
The list <bounding_boxes> should have one element for each red light in the
image. Each element of <bounding_boxes> should itself be a list, containing
four integers that specify a bounding box: the row and column index of the
top left corner and the row and column index of the bottom right corner (in
that order). See the code below for an example.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
'''
BEGIN YOUR CODE
'''
bounding_boxes = []
# define the height and width of the kernel
k_height = np.shape(k)[0]
k_width = np.shape(k)[1]
# normalize the kernel
k_red = normalize(k[...,0]).flatten()
k_green = normalize(k[...,1]).flatten()
k_blue = normalize(k[...,2]).flatten()
# create a matrix to store the scores
score = np.zeros([np.shape(I)[0]-k_height,np.shape(I)[1]-k_width])
# convolve the kernel with the image
# loop through the image
for r in range(np.shape(I)[0]-k_height):
for c in range(np.shape(I)[1]-k_width):
# select a region of the image with the same size as the kernel
d = I[r:r+k_height,c:c+k_width,:]
# normalize the selected region of the image
d_red = normalize(d[...,0]).flatten()
d_green = normalize(d[...,1]).flatten()
d_blue = normalize(d[...,2]).flatten()
red_score = np.inner(d_red,k_red)
green_score = np.inner(d_green,k_green)
blue_score = np.inner(d_blue,k_blue)
avg_score = (red_score+blue_score+green_score)/3
if avg_score > threshold:
score[r][c] = avg_score
peaks = find_peaks(score)
for peak in peaks:
bounding_boxes.append([peak[0],peak[1],peak[0]+k_height,peak[1]+k_width])
'''
END YOUR CODE
'''
for i in range(len(bounding_boxes)):
assert len(bounding_boxes[i]) == 4
return bounding_boxes
# set the current path
path = os.getcwd()
# set the path to the downloaded data:
data_path = r'C:\Users\amora\Documents\Caltech\EE 148\HW1\data\RedLights2011_Medium'
# set a path for saving predictions:
preds_path = path + '\\hw01_preds'
os.makedirs(preds_path,exist_ok=True) # create directory if needed
# get sorted list of files:
file_names = sorted(os.listdir(data_path))
# remove any non-JPEG files:
file_names = [f for f in file_names if '.jpg' in f]
# define the kernel
# read image using PIL:
I = Image.open(os.path.join(data_path,'RL-011.jpg'))
I = np.array(I)
k = I[68:132,349:376,:]
# read image using PIL:
I = Image.open(os.path.join(data_path,'RL-032.jpg'))
I = | np.array(I) | numpy.array |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": | np.arange(10, 20) | numpy.arange |
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name,anomalous-backslash-in-string
"""
A collection of useful quantum information functions.
Currently this file is very sparse. More functions will be added
over time.
"""
import math
import warnings
import numpy as np
import scipy.linalg as la
from scipy.stats import unitary_group
from qiskit import QISKitError
from qiskit.quantum_info import pauli_group
from qiskit.quantum_info import state_fidelity as new_state_fidelity
###############################################################
# circuit manipulation.
###############################################################
# Define methods for making QFT circuits
def qft(circ, q, n):
"""n-qubit QFT on q in circ."""
for j in range(n):
for k in range(j):
circ.cu1(math.pi / float(2**(j - k)), q[j], q[k])
circ.h(q[j])
###############################################################
# State manipulation.
###############################################################
def partial_trace(state, trace_systems, dimensions=None, reverse=True):
"""
Partial trace over subsystems of multi-partite matrix.
Note that subsystems are ordered as rho012 = rho0(x)rho1(x)rho2.
Args:
state (matrix_like): a matrix NxN
trace_systems (list(int)): a list of subsystems (starting from 0) to
trace over.
dimensions (list(int)): a list of the dimensions of the subsystems.
If this is not set it will assume all
subsystems are qubits.
reverse (bool): ordering of systems in operator.
If True system-0 is the right most system in tensor product.
If False system-0 is the left most system in tensor product.
Returns:
matrix_like: A density matrix with the appropriate subsystems traced
over.
Raises:
Exception: if input is not a multi-qubit state.
"""
state = np.array(state) # convert op to density matrix
if dimensions is None: # compute dims if not specified
num_qubits = int(np.log2(len(state)))
dimensions = [2 for _ in range(num_qubits)]
if len(state) != 2 ** num_qubits:
raise Exception("Input is not a multi-qubit state, "
"specify input state dims")
else:
dimensions = list(dimensions)
if isinstance(trace_systems, int):
trace_systems = [trace_systems]
else: # reverse sort trace sys
trace_systems = sorted(trace_systems, reverse=True)
# trace out subsystems
if state.ndim == 1:
# optimized partial trace for input state vector
return __partial_trace_vec(state, trace_systems, dimensions, reverse)
# standard partial trace for input density matrix
return __partial_trace_mat(state, trace_systems, dimensions, reverse)
def __partial_trace_vec(vec, trace_systems, dimensions, reverse=True):
"""
Partial trace over subsystems of multi-partite vector.
Args:
vec (vector_like): complex vector N
trace_systems (list(int)): a list of subsystems (starting from 0) to
trace over.
dimensions (list(int)): a list of the dimensions of the subsystems.
If this is not set it will assume all
subsystems are qubits.
reverse (bool): ordering of systems in operator.
If True system-0 is the right most system in tensor product.
If False system-0 is the left most system in tensor product.
Returns:
ndarray: A density matrix with the appropriate subsystems traced over.
"""
# trace sys positions
if reverse:
dimensions = dimensions[::-1]
trace_systems = len(dimensions) - 1 - np.array(trace_systems)
rho = vec.reshape(dimensions)
rho = np.tensordot(rho, rho.conj(), axes=(trace_systems, trace_systems))
d = int(np.sqrt(np.product(rho.shape)))
return rho.reshape(d, d)
def __partial_trace_mat(mat, trace_systems, dimensions, reverse=True):
"""
Partial trace over subsystems of multi-partite matrix.
Note that subsystems are ordered as rho012 = rho0(x)rho1(x)rho2.
Args:
mat (matrix_like): a matrix NxN.
trace_systems (list(int)): a list of subsystems (starting from 0) to
trace over.
dimensions (list(int)): a list of the dimensions of the subsystems.
If this is not set it will assume all
subsystems are qubits.
reverse (bool): ordering of systems in operator.
If True system-0 is the right most system in tensor product.
If False system-0 is the left most system in tensor product.
Returns:
ndarray: A density matrix with the appropriate subsystems traced over.
"""
trace_systems = sorted(trace_systems, reverse=True)
for j in trace_systems:
# Partition subsystem dimensions
dimension_trace = int(dimensions[j]) # traced out system
if reverse:
left_dimensions = dimensions[j + 1:]
right_dimensions = dimensions[:j]
dimensions = right_dimensions + left_dimensions
else:
left_dimensions = dimensions[:j]
right_dimensions = dimensions[j + 1:]
dimensions = left_dimensions + right_dimensions
# Contract remaining dimensions
dimension_left = int(np.prod(left_dimensions))
dimension_right = int(np.prod(right_dimensions))
# Reshape input array into tri-partite system with system to be
# traced as the middle index
mat = mat.reshape([dimension_left, dimension_trace, dimension_right,
dimension_left, dimension_trace, dimension_right])
# trace out the middle system and reshape back to a matrix
mat = mat.trace(axis1=1, axis2=4).reshape(
dimension_left * dimension_right,
dimension_left * dimension_right)
return mat
def vectorize(density_matrix, method='col'):
"""Flatten an operator to a vector in a specified basis.
Args:
density_matrix (ndarray): a density matrix.
method (str): the method of vectorization. Allowed values are
- 'col' (default) flattens to column-major vector.
- 'row' flattens to row-major vector.
- 'pauli'flattens in the n-qubit Pauli basis.
- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by
weight.
Returns:
ndarray: the resulting vector.
Raises:
Exception: if input state is not a n-qubit state
"""
density_matrix = np.array(density_matrix)
if method == 'col':
return density_matrix.flatten(order='F')
elif method == 'row':
return density_matrix.flatten(order='C')
elif method in ['pauli', 'pauli_weights']:
num = int(np.log2(len(density_matrix))) # number of qubits
if len(density_matrix) != 2**num:
raise Exception('Input state must be n-qubit state')
if method == 'pauli_weights':
pgroup = pauli_group(num, case=0)
else:
pgroup = pauli_group(num, case=1)
vals = [np.trace(np.dot(p.to_matrix(), density_matrix))
for p in pgroup]
return np.array(vals)
return None
def devectorize(vectorized_mat, method='col'):
"""Devectorize a vectorized square matrix.
Args:
vectorized_mat (ndarray): a vectorized density matrix.
method (str): the method of devectorization. Allowed values are
- 'col' (default): flattens to column-major vector.
- 'row': flattens to row-major vector.
- 'pauli': flattens in the n-qubit Pauli basis.
- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by
weight.
Returns:
ndarray: the resulting matrix.
Raises:
Exception: if input state is not a n-qubit state
"""
vectorized_mat = np.array(vectorized_mat)
dimension = int(np.sqrt(vectorized_mat.size))
if len(vectorized_mat) != dimension * dimension:
raise Exception('Input is not a vectorized square matrix')
if method == 'col':
return vectorized_mat.reshape(dimension, dimension, order='F')
elif method == 'row':
return vectorized_mat.reshape(dimension, dimension, order='C')
elif method in ['pauli', 'pauli_weights']:
num_qubits = int(np.log2(dimension)) # number of qubits
if dimension != 2 ** num_qubits:
raise Exception('Input state must be n-qubit state')
if method == 'pauli_weights':
pgroup = pauli_group(num_qubits, case=0)
else:
pgroup = pauli_group(num_qubits, case=1)
pbasis = np.array([p.to_matrix() for p in pgroup]) / 2 ** num_qubits
return np.tensordot(vectorized_mat, pbasis, axes=1)
return None
def choi_to_rauli(choi, order=1):
"""
Convert a Choi-matrix to a Pauli-basis superoperator.
Note that this function assumes that the Choi-matrix
is defined in the standard column-stacking convention
and is normalized to have trace 1. For a channel E this
is defined as: choi = (I \\otimes E)(bell_state).
The resulting 'rauli' R acts on input states as
|rho_out>_p = R.|rho_in>_p
where |rho> = vectorize(rho, method='pauli') for order=1
and |rho> = vectorize(rho, method='pauli_weights') for order=0.
Args:
choi (matrix): the input Choi-matrix.
order (int): ordering of the Pauli group vector.
order=1 (default) is standard lexicographic ordering.
Eg: [II, IX, IY, IZ, XI, XX, XY,...]
order=0 is ordered by weights.
Eg. [II, IX, IY, IZ, XI, XY, XZ, XX, XY,...]
Returns:
np.array: A superoperator in the Pauli basis.
"""
# get number of qubits'
num_qubits = int(np.log2(np.sqrt(len(choi))))
pgp = pauli_group(num_qubits, case=order)
rauli = []
for i in pgp:
for j in pgp:
pauliop = np.kron(j.to_matrix().T, i.to_matrix())
rauli += [np.trace(np.dot(choi, pauliop))]
return np.array(rauli).reshape(4 ** num_qubits, 4 ** num_qubits)
def chop(array, epsilon=1e-10):
"""
Truncate small values of a complex array.
Args:
array (array_like): array to truncte small values.
epsilon (float): threshold.
Returns:
np.array: A new operator with small values set to zero.
"""
ret = np.array(array)
if np.isrealobj(ret):
ret[abs(ret) < epsilon] = 0.0
else:
ret.real[abs(ret.real) < epsilon] = 0.0
ret.imag[abs(ret.imag) < epsilon] = 0.0
return ret
def outer(vector1, vector2=None):
"""
Construct the outer product of two vectors.
The second vector argument is optional, if absent the projector
of the first vector will be returned.
Args:
vector1 (ndarray): the first vector.
vector2 (ndarray): the (optional) second vector.
Returns:
np.array: The matrix |v1><v2|.
"""
if vector2 is None:
vector2 = np.array(vector1).conj()
else:
vector2 = np.array(vector2).conj()
return np.outer(vector1, vector2)
###############################################################
# Random Matrices.
###############################################################
def random_unitary_matrix(length):
"""
Return a random unitary ndarray.
Args:
length (int): the length of the returned unitary.
Returns:
ndarray: U (length, length) unitary ndarray.
"""
return unitary_group.rvs(length)
def random_density_matrix(length, rank=None, method='Hilbert-Schmidt'):
"""
Generate a random density matrix rho.
Args:
length (int): the length of the density matrix.
rank (int or None): the rank of the density matrix. The default
value is full-rank.
method (string): the method to use.
'Hilbert-Schmidt': sample rho from the Hilbert-Schmidt metric.
'Bures': sample rho from the Bures metric.
Returns:
ndarray: rho (length, length) a density matrix.
Raises:
QISKitError: if the method is not valid.
"""
if method == 'Hilbert-Schmidt':
return __random_density_hs(length, rank)
elif method == 'Bures':
return __random_density_bures(length, rank)
else:
raise QISKitError('Error: unrecognized method {}'.format(method))
def __ginibre_matrix(nrow, ncol=None):
"""
Return a normally distributed complex random matrix.
Args:
nrow (int): number of rows in output matrix.
ncol (int): number of columns in output matrix.
Returns:
ndarray: A complex rectangular matrix where each real and imaginary
entry is sampled from the normal distribution.
"""
if ncol is None:
ncol = nrow
G = np.random.normal(size=(nrow, ncol)) + \
np.random.normal(size=(nrow, ncol)) * 1j
return G
def __random_density_hs(N, rank=None):
"""
Generate a random density matrix from the Hilbert-Schmidt metric.
Args:
N (int): the length of the density matrix.
rank (int or None): the rank of the density matrix. The default
value is full-rank.
Returns:
ndarray: rho (N,N a density matrix.
"""
G = __ginibre_matrix(N, rank)
G = G.dot(G.conj().T)
return G / np.trace(G)
def __random_density_bures(N, rank=None):
"""
Generate a random density matrix from the Bures metric.
Args:
N (int): the length of the density matrix.
rank (int or None): the rank of the density matrix. The default
value is full-rank.
Returns:
ndarray: rho (N,N) a density matrix.
"""
P = np.eye(N) + random_unitary_matrix(N)
G = P.dot(__ginibre_matrix(N, rank))
G = G.dot(G.conj().T)
return G / np.trace(G)
###############################################################
# Measures.
###############################################################
def state_fidelity(state1, state2):
"""Return the state fidelity between two quantum states.
Either input may be a state vector, or a density matrix. The state
fidelity (F) for two density matrices is defined as:
F(rho1, rho2) = Tr[sqrt(sqrt(rho1).rho2.sqrt(rho1))] ^ 2
For two pure states the fidelity is given by
F(|psi1>, |psi2>) = |<psi1|psi2>|^2
Args:
state1 (array_like): a quantum state vector or density matrix.
state2 (array_like): a quantum state vector or density matrix.
Returns:
array_like: The state fidelity F(state1, state2).
"""
warnings.warn('The state_fidelity() function has moved to states not qi', DeprecationWarning)
return new_state_fidelity(state1, state2)
def purity(state):
"""Calculate the purity of a quantum state.
Args:
state (np.array): a quantum state
Returns:
float: purity.
"""
rho = np.array(state)
if rho.ndim == 1:
rho = outer(rho)
return np.real(np.trace(rho.dot(rho)))
def concurrence(state):
"""Calculate the concurrence.
Args:
state (np.array): a quantum state (1x4 array) or a density matrix (4x4
array)
Returns:
float: concurrence.
Raises:
Exception: if attempted on more than two qubits.
"""
rho = np.array(state)
if rho.ndim == 1:
rho = outer(state)
if len(state) != 4:
raise Exception("Concurrence is only defined for more than two qubits")
YY = np.fliplr(np.diag([-1, 1, 1, -1]))
A = rho.dot(YY).dot(rho.conj()).dot(YY)
w = la.eigh(A, eigvals_only=True)
w = np.sqrt(np.maximum(w, 0))
return max(0.0, w[-1] - np.sum(w[0:-1]))
def shannon_entropy(pvec, base=2):
"""
Compute the Shannon entropy of a probability vector.
The shannon entropy of a probability vector pv is defined as
$H(pv) = - \\sum_j pv[j] log_b (pv[j])$ where $0 log_b 0 = 0$.
Args:
pvec (array_like): a probability vector.
base (int): the base of the logarith
Returns:
float: The Shannon entropy H(pvec).
"""
# pylint: disable=missing-docstring
if base == 2:
def logfn(x):
return - x * np.log2(x)
elif base == np.e:
def logfn(x):
return - x * np.log(x)
else:
def logfn(x):
return -x * np.log(x) / np.log(base)
h = 0.
for x in pvec:
if 0 < x < 1:
h += logfn(x)
return h
def entropy(state):
"""
Compute the von-Neumann entropy of a quantum state.
Args:
state (array_like): a density matrix or state vector.
Returns:
float: The von-Neumann entropy S(rho).
"""
rho = np.array(state)
if rho.ndim == 1:
return 0
evals = np.maximum(np.linalg.eigvalsh(state), 0.)
return shannon_entropy(evals, base=np.e)
def mutual_information(state, d0, d1=None):
"""
Compute the mutual information of a bipartite state.
Args:
state (array_like): a bipartite state-vector or density-matrix.
d0 (int): dimension of the first subsystem.
d1 (int or None): dimension of the second subsystem.
Returns:
float: The mutual information S(rho_A) + S(rho_B) - S(rho_AB).
"""
if d1 is None:
d1 = int(len(state) / d0)
mi = entropy(partial_trace(state, [0], dimensions=[d0, d1]))
mi += entropy(partial_trace(state, [1], dimensions=[d0, d1]))
mi -= entropy(state)
return mi
def entanglement_of_formation(state, d0, d1=None):
"""
Compute the entanglement of formation of quantum state.
The input quantum state must be either a bipartite state vector, or a
2-qubit density matrix.
Args:
state (array_like): (N) array_like or (4,4) array_like, a
bipartite quantum state.
d0 (int): the dimension of the first subsystem.
d1 (int or None): the dimension of the second subsystem.
Returns:
float: The entanglement of formation.
"""
state = np.array(state)
if d1 is None:
d1 = int(len(state) / d0)
if state.ndim == 2 and len(state) == 4 and d0 == 2 and d1 == 2:
return __eof_qubit(state)
elif state.ndim == 1:
# trace out largest dimension
if d0 < d1:
tr = [1]
else:
tr = [0]
state = partial_trace(state, tr, dimensions=[d0, d1])
return entropy(state)
else:
print('Input must be a state-vector or 2-qubit density matrix.')
return None
def __eof_qubit(rho):
"""
Compute the Entanglement of Formation of a 2-qubit density matrix.
Args:
rho ((array_like): (4,4) array_like, input density matrix.
Returns:
float: The entanglement of formation.
"""
c = concurrence(rho)
c = 0.5 + 0.5 * np.sqrt(1 - c * c)
return shannon_entropy([c, 1 - c])
###############################################################
# Other.
###############################################################
def is_pos_def(x):
"""Return is_pos_def."""
return np.all( | np.linalg.eigvals(x) | numpy.linalg.eigvals |
# -------------------------------------------------------------------------------
# Module Name: LMS Algorithm (offline)
# Author m_tsutsui
# -------------------------------------------------------------------------------
# Library_Import#############################
from numpy import*
import math
import numpy as np
import matplotlib.pyplot as plt
# Library_Import_end##########################
def lms_off(myu, update, samp_n):
"""
myu:step size , update:update count
smp_n:desired signal sample number
"""
w = np.random.rand(d_size, 1) # initial coefficient
for n in np.arange(1, update, 1):
# w = (np.eye(d_size, d_size)-np.array(myu)*matrix(R)) * \
# J matrix(w)+array(myu)*d[samp_n, 0]*matrix(x)
w = np.dot((np.eye(d_size, d_size) - np.dot(np.array(myu), np.array(R))), np.array(w)) \
+ np.dot(np.dot(np.array(myu), d[samp_n, 0]), | np.array(x) | numpy.array |
import json
import numpy
from tqdm import tqdm
from llg.functions import heun
from llg.functions import energy
from llg.system import System
from llg.bucket import Bucket
import random
def get_random_state(num_sites):
random_state = numpy.random.normal(size=(num_sites, 3))
norms = numpy.linalg.norm(random_state, axis=1)
random_state = [vec / norms[i] for i, vec in enumerate(random_state)]
return random_state
class Simulation:
"""This is a class for make a simulation in order to evolve the state of the system.
:param system: Object that contains index, position, type_, mu,
anisotropy_constant, anisotopy_axis and field_axis (geometry). Also it contains a
source, target, and jex (neighbors). Finally it contains units, damping,
gyromagnetic, and deltat.
:type system:
:param temperature: The temperature of the sites in the system.
:type temperature: float/list
:param field: The field that acts under the sites in the system.
:type field: float/list
:param num_iterations: The number of iterations for evolve the system.
:type num_iterations: int
:param seed: The seed for the random state.
:type seed: int
:param initial_state: The initial state of the sites in te system.
:type initial_state: list
"""
def __init__(
self,
system,
temperature: Bucket,
field: Bucket,
num_iterations=None,
seed=None,
initial_state=None,
):
"""
The constructor for Simulation class.
"""
self.system = system
self.temperature = temperature
self.field = field
self.seed = seed
if num_iterations:
self.num_iterations = num_iterations
else:
self.num_iterations = 1000
if seed:
self.seed = seed
else:
self.seed = random.getrandbits(32)
| numpy.random.seed(self.seed) | numpy.random.seed |
import logging
import re
import glob
import os
from typing import Dict, Optional, Tuple, List
from pathlib import Path
import numpy as np
from scipy.fftpack import next_fast_len
from draco.util import tools
from draco.core.containers import FrequencyStackByPol, MockFrequencyStackByPol
from . import utils
logger = logging.getLogger(__name__)
class SignalTemplate:
"""Create signal templates from pre-simulated modes and input parameters.
Parameters
----------
derivs
A dictionary of derivates expected, giving their name (key), and a tuple of the
parameter difference used in the simulations (between the perturbed sim and the
base values) and the fiducial value of the parameter.
factor
A scaling factor to apply to the sims. Unfortunately some of the sims were
generated in mK rather than K, so the default value (`1e-3`) will scale the
templates into Kelvin.
aliases
Allow the parameters to be given by more meaningful names.
"""
def __init__(
self,
derivs: Optional[Dict[str, Tuple[float, float]]] = None,
factor: float = 1e-3,
aliases: Optional[Dict[str, str]] = None,
):
if derivs is None:
derivs = {
"NL": (0.3, 1.0),
"FoGh": (0.2, 1.0),
"FoGg": (0.2, 1.0),
}
self._derivs = derivs
self._factor = factor
self._aliases = aliases if aliases is not None else {}
@classmethod
def load_from_stackfiles(
cls,
pattern: str,
pol: List[str] = None,
weight: np.ndarray = None,
combine: bool = True,
sort: bool = True,
**kwargs,
):
"""Load the signal template from a set of stack files.
This will load the stack files from each location and try and compile them into
a set which can be used to generate signal templates.
Parameters
----------
pattern
A glob pattern that isolates the base signal templates.
pol
The desired polarisations.
weight
The weight to use when averaging over polarisations.
Must have shape [npol, nfreq]. Only relevant if combine is True.
combine
Add an element to the polarisation axis called I that
is the weighted sum of the XX and YY polarisation.
sort
Sort the frequency offset axis in ascending order.
**kwargs
Arguments passed on to the constructor.
"""
dirs = glob.glob(pattern)
matching = {}
# Find directories which match the right format
for d in sorted(dirs):
mo = re.search(r"_compderiv-([^\/]+)", d)
if mo is None:
print(f"Directory {d} does not match expected format, rejecting")
continue
key = mo.group(1)
if key in matching:
raise ValueError(
"Did not find a unique set of modes at this location. "
"You might need to refine the pattern."
)
d = Path(d)
if not d.is_dir():
raise ValueError("Glob must point to directories")
matching[key] = Path(d)
# For each directory load all the stacking files and combine them
stacks = {}
for key, d in matching.items():
stack_files = sorted(list(d.glob("*.h5")))
if len(stack_files) == 0:
print("No files found at matching path.")
continue
mocks = utils.load_mocks(stack_files, pol=pol)
mocks.weight[:] = weight[np.newaxis, :] if weight is not None else 1.0
stacks[key] = utils.average_stacks(
mocks, pol=mocks.pol, combine=combine, sort=sort
)
# Create the object and try and construct all the required templates from the
# stacks
self = cls(**kwargs)
self._interpret_stacks(stacks)
return self
def _interpret_stacks(self, stacks: Dict[str, FrequencyStackByPol]):
# Generate the required templates from the stacks
# Find all entries that have the linear component structure
compterms = [k.split("-")[1] for k in stacks.keys() if k.startswith("00")]
stack_modes = {}
# Get the first frequency axis as a reference
self._freq = next(iter(stacks.values())).freq[:].copy()
self._freq.flags.writeable = False
def _check_load_stack(key):
# Validate the stack and extract the template and its variance
if key not in stacks:
raise RuntimeError(f"Stack {key} was not loaded.")
stack = stacks[key]
if not np.array_equal(stack.freq[:], self._freq):
raise RuntimeError(
f"Frequencies in stack {key} do not match reference."
)
return (
self._factor * stack.stack[:],
self._factor ** 2
* tools.invert_no_zero(stack.attrs["num"] * stack.weight[:]),
)
# For all linear component terms load them and construct the various HI,g,v
# combination terms
for term in compterms:
s00, v00 = _check_load_stack(f"00-{term}")
s01, v01 = _check_load_stack(f"01-{term}")
s10, v10 = _check_load_stack(f"10-{term}")
s11, v11 = _check_load_stack(f"11-{term}")
template_mean = np.zeros((4,) + s00.shape)
template_var = np.zeros((4,) + s00.shape)
# Calculate the template for each component
template_mean[0] = s11 - s10 - s01 + s00 # Phg
template_mean[1] = s10 - s00 # Phv
template_mean[2] = s01 - s00 # Pvg
template_mean[3] = s00 # Pvv
# Calculate the variance of each component
template_var[0] = v11 + v10 + v01 + v00
template_var[1] = v10 + v00
template_var[2] = v01 + v00
template_var[3] = v00
stack_modes[term] = (template_mean, template_var)
self._stack_comp = {}
self._stack_noncomp = {}
self._stack_comp["base"] = stack_modes["base"]
# For the expected derivative modes combine the perturbed entry and the base
# templates to get the derivative templates
for name, (delta, _) in self._derivs.items():
if name not in stack_modes:
raise RuntimeError(f"Expected derivative {name} but could not load it.")
s, v = stack_modes[name]
sb, vb = stack_modes["base"]
# Calculate the finite difference derivative
fd_mode = (s - sb) / delta
fd_var = (v + vb) / delta ** 2
self._stack_comp[name] = (fd_mode, fd_var)
# Load any non-component type terms. These are terms which sit outside the usual
# bias and Kaiser factors (such as shot noise)
noncompterms = [k for k in stacks.keys() if "-" not in k]
for term in noncompterms:
self._stack_noncomp[term] = _check_load_stack(term)
def signal(
self, *, omega: float, b_HI: float, b_g: float, **kwargs: float
) -> np.ndarray:
"""Return the signal template for the given parameters.
Parameters
----------
omega
Overall scaling.
b_HI
Scaling for the HI bias term.
b_g
Scaling for tracer bias term.
**kwargs
Values for all other derivative terms (e.g. NL) and non-component terms
(e.g. shotnoise).
Returns
-------
signal
Signal template for the given parameters. An array of [pol, freq offset].
"""
def _combine(vec):
# Combine the bias terms and templates to get a new template
return b_HI * b_g * vec[0] + b_HI * vec[1] + b_g * vec[2] + vec[3]
# Generate the signal for the base model
signal = _combine(self._stack_comp["base"][0])
# Add in any derivative contributions
for name, (_, x0) in self._derivs.items():
stack = _combine(self._stack_comp[name][0])
name = self._aliases.get(name, name)
if name not in kwargs:
raise ValueError(f"Need a value for deriv parameter {name}")
x = kwargs[name]
signal += stack * (x - x0)
# Convolve signal with a kernel
# before adding in the non-component contributions
signal = self.convolve_pre_noncomp(signal, **kwargs)
# Add in any non-component contributins
for name, stack in self._stack_noncomp.items():
name = self._aliases.get(name, name)
if name not in kwargs:
raise ValueError(f"Need a value for non-comp parameter {name}")
x = kwargs[name]
signal += stack[0] * x
# Convolve signal with a kernel
# after adding in the non-component contributions
signal = self.convolve_post_noncomp(signal, **kwargs)
# Scale by the overall prefactor
signal *= omega
return signal
def convolve_pre_noncomp(self, signal: np.ndarray, **kwargs) -> np.ndarray:
"""Override in subclass to convolve signal with kernel pre-non-components."""
return signal
def convolve_post_noncomp(self, signal: np.ndarray, **kwargs) -> np.ndarray:
"""Override in subclass to convolve signal with kernel post-non-components."""
return signal
@property
def freq(self):
"""Get the frequency separations the template is defined at."""
return self._freq
@property
def params(self):
"""The names of all the parameters needed to generate the template."""
return (
["omega", "b_HI", "b_g"]
+ [self._aliases.get(name, name) for name in self._stack_comp.keys()]
+ [self._aliases.get(name, name) for name in self._stack_noncomp.keys()]
)
class SignalTemplateFoG(SignalTemplate):
"""Create signal templates from pre-simulated modes and input parameters.
Reconvolves the stacked signal with a kernel to simulate FoG damping,
in contrast to the SignalTemplate class that uses a linear model for
the FoG damping.
Parameters
----------
derivs
A dictionary of derivates expected, giving their name (key), and a tuple of the
parameter difference used in the simulations (between the perturbed sim and the
base values) and the fiducial value of the parameter.
convolutions
A dictionary of the expected convolution parameters, giving their name (key),
and a tuple of the parameter difference used in the simulations (between the
perturbed sim and the base values) and the fiducial value of the parameter.
delay_range
The lower and upper boundary of the delay in micro-seconds that will
be used to fit for the effective scale of the base convolution kernel.
Defaults to (0.25, 0.80) micro-seconds.
"""
def __init__(
self,
derivs: Optional[Dict[str, Tuple[float, float]]] = None,
convolutions: Optional[Dict[str, Tuple[float, float]]] = None,
delay_range: Optional[Tuple[float, float]] = None,
*args,
**kwargs,
):
if derivs is None:
derivs = {
"NL": (0.3, 1.0),
}
if convolutions is None:
convolutions = {
"FoGh": (0.2, 1.0),
"FoGg": (0.2, 1.0),
}
if delay_range is None:
delay_range = (0.25, 0.8)
self._convolutions = convolutions
self._delay_range = delay_range
super().__init__(derivs=derivs, *args, **kwargs)
def _solve_scale(
self, base: FrequencyStackByPol, deriv: FrequencyStackByPol, alpha: float
) -> np.ndarray:
"""Solve for the effective scale of the FoG damping.
Parameters
----------
base
Stacked signal from simulations with the base parameters.
deriv
Stacked signal from simulations with the FoG parameter perturbed.
alpha
The ratio of the FoG parameter for deriv relative to base.
Returns
-------
scale : np.ndarray[npol,]
The effective scale of the transfer function:
H(\tau) = 1 / (1 + (scale * \tau)^2)
"""
nfreq = self.freq.size
df = np.abs(self.freq[1] - self.freq[0])
tau = np.fft.rfftfreq(nfreq, d=df)[np.newaxis, :]
tau2 = tau ** 2
mu_fft_base = np.abs(np.fft.rfft(base.stack[:], nfreq, axis=-1))
mu_fft_deriv = np.abs(np.fft.rfft(deriv.stack[:], nfreq, axis=-1))
var_fft_base = np.sum(
tools.invert_no_zero(base.attrs["num"] * base.weight[:]),
axis=-1,
keepdims=True,
)
var_fft_deriv = np.sum(
tools.invert_no_zero(deriv.attrs["num"] * deriv.weight[:]),
axis=-1,
keepdims=True,
)
ratio = mu_fft_base * tools.invert_no_zero(mu_fft_deriv)
var_ratio = ratio ** 2 * (
var_fft_base * tools.invert_no_zero(mu_fft_base ** 2)
+ var_fft_deriv * tools.invert_no_zero(mu_fft_deriv ** 2)
)
y = (ratio - 1.0) * tools.invert_no_zero(alpha ** 2 - ratio)
w = (alpha ** 2 - ratio) ** 4 * tools.invert_no_zero(
(alpha * 2 - 1.0) ** 2 * var_ratio
)
w *= ((tau >= self._delay_range[0]) & (tau <= self._delay_range[1])).astype(
np.float32
)
scale2 = np.sum(w * tau2 * y, axis=-1) * tools.invert_no_zero(
np.sum(w * tau2 ** 2, axis=-1)
)
return np.sqrt(scale2)
def _interpret_stacks(self, stacks: Dict[str, FrequencyStackByPol]):
super()._interpret_stacks(stacks)
base = stacks["11-base"]
self._convolution_scale = {}
for name, (delta, x0) in self._convolutions.items():
key = f"11-{name}"
alpha = (x0 + delta) / x0
if key not in stacks:
raise RuntimeError(f"Expected derivative {name} but could not load it.")
# Determine the effective scale
scale = self._solve_scale(base, stacks[key], alpha)
self._convolution_scale[name] = scale
def convolve_pre_noncomp(self, signal: np.ndarray, **kwargs) -> np.ndarray:
"""Convolve the stacked signal with the relative FoG kernel.
Parameters
----------
signal : np.ndarray[npol, nfreq]
The stacked signal before adding the non-component contributions.
kwargs : dict
All parameter values.
Returns
-------
signal : np.ndarray[npol, nfreq]
The input stacked signal after convolving with the relative FoG kernel.
"""
# Figure out the size needed to perform the convolution
nfreq = self.freq.size
fsize = next_fast_len(nfreq)
fslice = slice(0, nfreq)
# Determine the delay axis
df = np.abs(self.freq[1] - self.freq[0])
tau = | np.fft.rfftfreq(fsize, d=df) | numpy.fft.rfftfreq |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.