prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from math import pi
from math import sqrt
from compas.geometry import Box
from compas.geometry import Point
from compas.geometry import Frame
from compas.geometry import matrix_from_frame
from compas.geometry import matrix_inverse
from compas import PRECISION
class VolBox(object):
"""A volumetric box is defined by a base box from `compas.geometry` and an optional fillet radius.
The center of the volumetric box is positioned at the origin of the
coordinate system defined by the frame. The box is axis-aligned to the frame.
Parameters
----------
box : :class:`compas.geometry.Box`
The base box.
radius : float
The filletting radius along the edges, default=0.
Examples
--------
>>> from compas.geometry import Frame
>>> from compas.geometry import Box
>>> from compas_vol.primitives import VolBox
>>> box = Box(Frame.worldXY(), 1.0, 2.0, 3.0)
>>> vbx = VolBox(box, 0.3)
"""
def __init__(self, box, radius=0.0):
self._box = None
self.inversetransform = None
self._radius = None
self.box = box
self.radius = max(radius, 0)
@property
def box(self):
return self._box
@box.setter
def box(self, box):
if not isinstance(box, Box):
raise ValueError
self._box = box
transform = matrix_from_frame(self.box.frame)
self.inversetransform = matrix_inverse(transform)
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, radius):
self._radius = float(radius)
@property
def volume(self):
if self.radius == 0:
return self.box.volume
else:
xr = self.box.xsize - 2 * self.radius
yr = self.box.ysize - 2 * self.radius
zr = self.box.zsize - 2 * self.radius
# box without the radius layer
inner = xr * yr * zr
# sides
sides = Box.from_width_height_depth(xr, yr, zr).area * self.radius
# cylinder along all edges, base circle x height
edges = (self.radius**2 * pi) * (xr + yr + zr)
# eight time corner, 1/8th of a sphere = 1 sphere
corns = 4./3. * pi * self.radius**3
return inner + sides + edges + corns
@property
def data(self):
return {'box': self.box.data,
'radius': self.radius}
def to_data(self):
return {'type': self.__class__.__name__,
'content': self.data}
@data.setter
def data(self, data):
self.box = Box.from_data(data['box'])
self.radius = data['radius']
def __repr__(self):
return 'VolBox({0}, {1:.{2}f})'.format(str(self.box), self.radius, PRECISION[:1])
# ==========================================================================
# factory
# ==========================================================================
@classmethod
def from_data(cls, data):
"""Construct a volumetric box from its data representation.
Parameters
----------
data : :obj:`dict`
The data dictionary.
Returns
-------
VolBox
The constructed box.
Examples
--------
>>>
"""
box = Box.from_data(data['box'])
vbox = cls(box, data['radius'])
return vbox
def get_distance(self, point):
"""
single point distance function
Parameters
----------
point: :class:`compas.geometry.Point`
The point in R<sup>3</sup> space to query for it's distance.
Returns
-------
float
The distance from the query point to the surface of the object.
"""
if not isinstance(point, Point):
p = Point(*point)
else:
p = point
p.transform(self.inversetransform)
dx = abs(p.x) - (self.box.xsize / 2.0 - self.radius)
dy = abs(p.y) - (self.box.ysize / 2.0 - self.radius)
dz = abs(p.z) - (self.box.zsize / 2.0 - self.radius)
inside = max(dx, max(dy, dz)) - self.radius
dx = max(dx, 0)
dy = max(dy, 0)
dz = max(dz, 0)
if inside + self.radius < 0:
return inside
else:
corner = sqrt(dx * dx + dy * dy + dz * dz) - self.radius
return corner
def get_distance_numpy(self, x, y, z):
"""
vectorized distance function
Parameters
----------
x,y,z: `numpy arrays, np.ogrid[]`
The coordinates of all the points in R:sup:`3` space to query for their distances.
The shapes are ``x: (nx, 1, 1), y: (1, ny, 1), z: (1, 1, nz)``
Returns
-------
numpy array of floats, shape (nx, ny, nz)
The distances from the query points to the surface of the object.
"""
import numpy as np
p = np.array([x, y, z, 1], dtype=object)
xt, yt, zt, _ = np.dot(self.inversetransform, p)
dx = np.abs(xt) - (self.box.xsize / 2.0 - self.radius)
dy = np.abs(yt) - (self.box.ysize / 2.0 - self.radius)
dz = np.abs(zt) - (self.box.zsize / 2.0 - self.radius)
inside = np.maximum(dx, np.maximum(dy, dz)) - self.radius
dx = np.maximum(dx, 0)
dy = np.maximum(dy, 0)
dz = | np.maximum(dz, 0) | numpy.maximum |
#!/usr/bin/env python3
#
# Evolutionary Algorithms
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def check_dir(directory):
"""
:param directory: path to the directory
"""
os.makedirs(directory, exist_ok=True)
def sphere_test(data):
"""
:param data:
:return:
"""
f_x = np.sum(np.square(data), axis=-1)
return f_x
def rastrigin_test(data, A=10):
"""
:param data:
:param A:
:return:
"""
n = data.shape[1]
cos = np.cos(2 * np.pi * data)
e1 = np.square(data) - np.multiply(A, cos)
e2 = np.sum(e1, axis=-1)
return np.sum([A * n, e2])
def plot_2d_contour(obj_function):
"""
:param obj_function:
"""
x = np.linspace(-5, 5, 100)
y = np.linspace(-5, 5, 100)
X, Y = np.meshgrid(x, y)
data = np.dstack((X, Y))
S = obj_function(data)
plt.contour(X, Y, S)
def plot_fitness(out_dir, name, algo_name, x, y1, y2, title):
"""
(d) For each test function, plot the best and the worse fitness for each generation (averaged over 3 runs).
:param name:
:param x:
:param y1:
:param y2:
:param title:
"""
plt.figure()
plt.grid()
# Let x-axis be the generations and y-axis be the fitness values.
plt.plot(x, y1, label='avg_' + name.lower() + '_max')
plt.plot(x, y2, label='avg_' + name.lower() + '_min')
plt.xlabel('generations', fontsize=11)
plt.ylabel('fitness values', fontsize=11)
plt.gca().set_ylim(bottom=-70)
plt.annotate(round(y1[-1], 2), xy=(x[-1], y1[-1]), xycoords='data',
xytext=(-40, 15), size=10, textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
plt.annotate(round(y2[-1], 2), xy=(x[-1], y2[-1]), xycoords='data',
xytext=(-40, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
plt.legend()
plt.title(algo_name + '\n' + title, weight='bold', fontsize=12)
plt.savefig(out_dir + 'fitness.pdf')
plt.close()
def plot_generation(out_dir, name, i, iteration, min, obj_fun, sample):
"""
:param i:
:param iteration:
:param min:
:param obj_fun:
:param sample:
:return:
"""
if i % (iteration / 10) == 0:
plt.figure(1)
plt.clf()
plot_2d_contour(obj_fun)
plt.plot(sample[:, 0], sample[:, 1], 'ko')
plt.xlim([-5, 5])
plt.ylim([-5, 5])
plt.title(name.upper() + '\ngeneration: ' + str(i + 1) + '\nmin: ' + str(min[i]))
# plt.pause(0.1)
plt.savefig(out_dir + name + '-generation-contour-' + str(i) + '.pdf')
plt.close()
def cem(obj_fun, dim_domain, population_size, elite_set_ratio, learning_rate, iteration, out_dir, name, plot_generations):
"""
:param dim_domain:
:param population_size:
:param elite_set_ratio:
:param obj_fun:
:param iter:
:return mean:
"""
# Initialise parameters
# Note that you can uniformly sample the initial population parameters as long as they are reasonably far from
# the global optimum.
mean = np.random.uniform(-5, 5, dim_domain)
variance = np.random.uniform(4, 5, dim_domain)
max = np.zeros(iteration)
min = np.zeros(iteration)
for i in range(iteration):
# Obtain n sample from a normal distribution
sample = np.random.normal(mean, variance, [population_size, dim_domain])
# Evaluate objective function on an objective function
fitness = obj_fun(sample)
min[i] = np.min(fitness)
max[i] = np.max(fitness)
# Sort sample by objective function values in descending order
idx = np.argsort(fitness)
fittest = sample[idx]
# Elite set
p = np.rint(population_size * elite_set_ratio).astype(np.int)
elite = fittest[:p]
# PLOT
if plot_generations:
plot_generation(out_dir, name, i, iteration, min, obj_fun, sample)
# Refit a new Gaussian distribution from the elite set
mean = np.mean(elite, axis=0)
variance = np.std(elite, axis=0)
# Return mean of final sampling distribution as solution
return mean, min, max
def nes(obj_fun, dim_domain, population_size, elite_set_ratio, learning_rate, iteration, out_dir, name, plot_generations):
"""
:param dim_domain:
:param population_size:
:param obj_fun:
:param iter:
:return mean:
"""
# Initialise parameters
mean = np.random.uniform(-5, 5, dim_domain)
# variance = np.full(dim_domain, 1)
variance = np.random.uniform(4, 5, dim_domain)
max = np.zeros(iteration)
min = np.zeros(iteration)
for i in range(iteration):
# Obtain n sample from a normal distribution
sample = np.random.normal(mean, variance, [population_size, dim_domain])
# Evaluate objective function on an objective function
fitness = obj_fun(sample)
min[i] = np.min(fitness)
max[i] = np.max(fitness)
# Calculate the log derivatives
log_derivative_mu = (sample - mean) / (variance ** 2)
log_derivative_sigma = ((sample - mean) ** 2 - (variance ** 2)) / variance ** 3
J_gradient_mu = np.sum(fitness[..., np.newaxis] * log_derivative_mu, axis=0) / sample.shape[0]
J_gradient_sigma = np.sum(fitness[..., np.newaxis] * log_derivative_sigma, axis=0) / sample.shape[0]
F_mu = np.matmul(log_derivative_mu.T, log_derivative_mu) / sample.shape[0]
F_sigma = np.matmul(log_derivative_sigma.T, log_derivative_sigma) / sample.shape[0]
# PLOT
if plot_generations:
plot_generation(out_dir, name, i, iteration, min, obj_fun, sample)
# Update mean and variance
mean = mean - learning_rate * np.matmul( | np.linalg.inv(F_mu) | numpy.linalg.inv |
from unittest import TestCase
import unittest
import numpy as np
import active_subspaces.utils.qp_solver as qp
class TestGurobi(TestCase):
def test_gurobi_linear_program_ineq(self):
c = np.ones((2,1))
A = np.array([[1.0,0.0],[0.0,1.0],[1.0,1.0]])
b = np.array([[0.1],[0.1],[0.1]])
gs = qp.QPSolver()
x = gs.linear_program_ineq(c, A, b)
xtrue = np.array([0.1,0.1]).reshape((2,1))
np.testing.assert_almost_equal(x,xtrue)
def test_gurobi_linear_program_eq(self):
c = np.ones((5,1))
A = np.array([[2.0,1.0,0.,0.,0.],[0.,0.,2.0,1.0,0.]])
b = np.array([[0.5],[0.5]])
lb, ub = -np.ones((5,1)), np.ones((5,1))
gs = qp.QPSolver()
x = gs.linear_program_eq(c, A, b, lb, ub)
xtrue = np.array([0.75,-1.0,0.75,-1.0,-1.0]).reshape((5,1))
np.testing.assert_almost_equal(x,xtrue)
def test_gurobi_quadratic_program_bnd(self):
c = np.ones((5,1))
Q = np.eye(5)
lb, ub = -np.ones((5,1)), np.ones((5,1))
gs = qp.QPSolver()
x = gs.quadratic_program_bnd(c, Q, lb, ub)
xtrue = -0.5*np.ones((5,1))
np.testing.assert_almost_equal(x,xtrue)
def test_gurobi_quadratic_program_ineq(self):
c = np.ones((5,1))
Q = np.eye(5)
A = np.array([[1.,0.,0.,0.,0.],[0.,1.,0.,0.,0.]])
b = np.array([[-1.0],[-1.0]])
gs = qp.QPSolver()
x = gs.quadratic_program_ineq(c, Q, A, b)
xtrue = -0.5*np.ones((5,1))
np.testing.assert_almost_equal(x,xtrue)
def test_scipy_linear_program_ineq(self):
c = np.ones((2,1))
A = np.array([[1.0,0.0],[0.0,1.0],[1.0,1.0]])
b = np.array([[0.1],[0.1],[0.1]])
gs = qp.QPSolver(solver='SCIPY')
x = gs.linear_program_ineq(c, A, b)
xtrue = np.array([0.1,0.1]).reshape((2,1))
np.testing.assert_almost_equal(x,xtrue)
def test_scipy_linear_program_eq(self):
c = np.ones((5,1))
A = np.array([[2.0,1.0,0.,0.,0.],[0.,0.,2.0,1.0,0.]])
b = np.array([[0.5],[0.5]])
lb, ub = -np.ones((5,1)), np.ones((5,1))
gs = qp.QPSolver(solver='SCIPY')
x = gs.linear_program_eq(c, A, b, lb, ub)
xtrue = np.array([0.75,-1.0,0.75,-1.0,-1.0]).reshape((5,1))
np.testing.assert_almost_equal(x,xtrue)
def test_scipy_quadratic_program_bnd(self):
c = np.ones((5,1))
Q = np.eye(5)
lb, ub = -np.ones((5,1)), np.ones((5,1))
gs = qp.QPSolver(solver='SCIPY')
x = gs.quadratic_program_bnd(c, Q, lb, ub)
xtrue = -0.5*np.ones((5,1))
np.testing.assert_almost_equal(x,xtrue)
def test_scipy_quadratic_program_ineq(self):
c = np.ones((5,1))
Q = | np.eye(5) | numpy.eye |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to perform fitting of molecule with arbitrary
atom orders.
This module is supposed to perform exact comparisons without the atom order
correspondence prerequisite, while molecule_structure_comparator is supposed
to do rough comparisons with the atom order correspondence prerequisite.
The implementation is based on an excellent python package called `rmsd` that
you can find at https://github.com/charnley/rmsd.
"""
__author__ = "<NAME>, <NAME>"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "Aug 21, 2020"
import abc
import copy
import itertools
import logging
import math
import re
import numpy as np
from monty.dev import requires
from monty.json import MSONable
try:
from openbabel import openbabel as ob
from pymatgen.io.babel import BabelMolAdaptor
except ImportError:
ob = None
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
from pymatgen.core.structure import Molecule # pylint: disable=ungrouped-imports
logger = logging.getLogger(__name__)
class AbstractMolAtomMapper(MSONable, metaclass=abc.ABCMeta):
"""
Abstract molecular atom order mapping class. A mapping will be able to
find the uniform atom order of two molecules that can pair the
geometrically equivalent atoms.
"""
@abc.abstractmethod
def uniform_labels(self, mol1, mol2):
"""
Pair the geometrically equivalent atoms of the molecules.
Args:
mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object.
mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object.
Returns:
(list1, list2) if uniform atom order is found. list1 and list2
are for mol1 and mol2, respectively. Their length equal
to the number of atoms. They represents the uniform atom order
of the two molecules. The value of each element is the original
atom index in mol1 or mol2 of the current atom in uniform atom
order.
(None, None) if unform atom is not available.
"""
pass
@abc.abstractmethod
def get_molecule_hash(self, mol):
"""
Defines a hash for molecules. This allows molecules to be grouped
efficiently for comparison.
Args:
mol: The molecule. OpenBabel OBMol or pymatgen Molecule object
Returns:
A hashable object. Examples can be string formulas, etc.
"""
pass
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict
Returns:
AbstractMolAtomMapper
"""
for trans_modules in ["molecule_matcher"]:
import sys
if sys.version_info > (3, 0):
level = 0 # Python 3.x
else:
level = -1 # Python 2.x
mod = __import__(
"pymatgen.analysis." + trans_modules,
globals(),
locals(),
[d["@class"]],
level,
)
if hasattr(mod, d["@class"]):
class_proxy = getattr(mod, d["@class"])
from_dict_proxy = getattr(class_proxy, "from_dict")
return from_dict_proxy(d)
raise ValueError("Invalid Comparator dict")
class IsomorphismMolAtomMapper(AbstractMolAtomMapper):
"""
Pair atoms by isomorphism permutations in the OpenBabel::OBAlign class
"""
def uniform_labels(self, mol1, mol2):
"""
Pair the geometrically equivalent atoms of the molecules.
Calculate RMSD on all possible isomorphism mappings and return mapping
with the least RMSD
Args:
mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object.
mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object.
Returns:
(list1, list2) if uniform atom order is found. list1 and list2
are for mol1 and mol2, respectively. Their length equal
to the number of atoms. They represents the uniform atom order
of the two molecules. The value of each element is the original
atom index in mol1 or mol2 of the current atom in uniform atom
order.
(None, None) if unform atom is not available.
"""
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
h1 = self.get_molecule_hash(obmol1)
h2 = self.get_molecule_hash(obmol2)
if h1 != h2:
return None, None
query = ob.CompileMoleculeQuery(obmol1)
isomapper = ob.OBIsomorphismMapper.GetInstance(query)
isomorph = ob.vvpairUIntUInt()
isomapper.MapAll(obmol2, isomorph)
sorted_isomorph = [sorted(x, key=lambda morp: morp[0]) for x in isomorph]
label2_list = tuple([tuple([p[1] + 1 for p in x]) for x in sorted_isomorph])
vmol1 = obmol1
aligner = ob.OBAlign(True, False)
aligner.SetRefMol(vmol1)
least_rmsd = float("Inf")
best_label2 = None
label1 = list(range(1, obmol1.NumAtoms() + 1))
# noinspection PyProtectedMember
elements1 = InchiMolAtomMapper._get_elements(vmol1, label1)
for label2 in label2_list:
# noinspection PyProtectedMember
elements2 = InchiMolAtomMapper._get_elements(obmol2, label2)
if elements1 != elements2:
continue
vmol2 = ob.OBMol()
for i in label2:
vmol2.AddAtom(obmol2.GetAtom(i))
aligner.SetTargetMol(vmol2)
aligner.Align()
rmsd = aligner.GetRMSD()
if rmsd < least_rmsd:
least_rmsd = rmsd
best_label2 = copy.copy(label2)
return label1, best_label2
def get_molecule_hash(self, mol):
"""
Return inchi as molecular hash
"""
obconv = ob.OBConversion()
obconv.SetOutFormat(str("inchi"))
obconv.AddOption(str("X"), ob.OBConversion.OUTOPTIONS, str("DoNotAddH"))
inchi_text = obconv.WriteString(mol)
match = re.search(r"InChI=(?P<inchi>.+)\n", inchi_text)
return match.group("inchi")
def as_dict(self):
"""
Returns:
Jsonable dict.
"""
return {
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
IsomorphismMolAtomMapper
"""
return IsomorphismMolAtomMapper()
class InchiMolAtomMapper(AbstractMolAtomMapper):
"""
Pair atoms by inchi labels.
"""
def __init__(self, angle_tolerance=10.0):
"""
Args:
angle_tolerance (float): Angle threshold to assume linear molecule. In degrees.
"""
self._angle_tolerance = angle_tolerance
self._assistant_mapper = IsomorphismMolAtomMapper()
def as_dict(self):
"""
Returns:
MSONAble dict.
"""
return {
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"angle_tolerance": self._angle_tolerance,
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict Representation
Returns:
InchiMolAtomMapper
"""
return InchiMolAtomMapper(angle_tolerance=d["angle_tolerance"])
@staticmethod
def _inchi_labels(mol):
"""
Get the inchi canonical labels of the heavy atoms in the molecule
Args:
mol: The molecule. OpenBabel OBMol object
Returns:
The label mappings. List of tuple of canonical label,
original label
List of equivalent atoms.
"""
obconv = ob.OBConversion()
obconv.SetOutFormat(str("inchi"))
obconv.AddOption(str("a"), ob.OBConversion.OUTOPTIONS)
obconv.AddOption(str("X"), ob.OBConversion.OUTOPTIONS, str("DoNotAddH"))
inchi_text = obconv.WriteString(mol)
match = re.search(
r"InChI=(?P<inchi>.+)\nAuxInfo=.+" r"/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9," r";\(\)]*)/)?",
inchi_text,
)
inchi = match.group("inchi")
label_text = match.group("labels")
eq_atom_text = match.group("eq_atoms")
heavy_atom_labels = tuple([int(i) for i in label_text.replace(";", ",").split(",")])
eq_atoms = []
if eq_atom_text is not None:
eq_tokens = re.findall(r"\(((?:[0-9]+,)+[0-9]+)\)", eq_atom_text.replace(";", ","))
eq_atoms = tuple([tuple([int(i) for i in t.split(",")]) for t in eq_tokens])
return heavy_atom_labels, eq_atoms, inchi
@staticmethod
def _group_centroid(mol, ilabels, group_atoms):
"""
Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z)
"""
c1x, c1y, c1z = 0.0, 0.0, 0.0
for i in group_atoms:
orig_idx = ilabels[i - 1]
oa1 = mol.GetAtom(orig_idx)
c1x += float(oa1.x())
c1y += float(oa1.y())
c1z += float(oa1.z())
num_atoms = len(group_atoms)
c1x /= num_atoms
c1y /= num_atoms
c1z /= num_atoms
return c1x, c1y, c1z
def _virtual_molecule(self, mol, ilabels, eq_atoms):
"""
Create a virtual molecule by unique atoms, the centriods of the
equivalent atoms
Args:
mol: The molecule. OpenBabel OBMol object
ilables: inchi label map
eq_atoms: equivalent atom labels
farthest_group_idx: The equivalent atom group index in which
there is the farthest atom to the centroid
Return:
The virtual molecule
"""
vmol = ob.OBMol()
non_unique_atoms = {a for g in eq_atoms for a in g}
all_atoms = set(range(1, len(ilabels) + 1))
unique_atom_labels = sorted(all_atoms - non_unique_atoms)
# try to align molecules using unique atoms
for i in unique_atom_labels:
orig_idx = ilabels[i - 1]
oa1 = mol.GetAtom(orig_idx)
a1 = vmol.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
# try to align using centroids of the equivalent atoms
if vmol.NumAtoms() < 3:
for symm in eq_atoms:
c1x, c1y, c1z = self._group_centroid(mol, ilabels, symm)
min_distance = float("inf")
for i in range(1, vmol.NumAtoms() + 1):
va = vmol.GetAtom(i)
distance = math.sqrt((c1x - va.x()) ** 2 + (c1y - va.y()) ** 2 + (c1z - va.z()) ** 2)
if distance < min_distance:
min_distance = distance
if min_distance > 0.2:
a1 = vmol.NewAtom()
a1.SetAtomicNum(9)
a1.SetVector(c1x, c1y, c1z)
return vmol
@staticmethod
def _align_heavy_atoms(mol1, mol2, vmol1, vmol2, ilabel1, ilabel2, eq_atoms):
"""
Align the label of topologically identical atoms of second molecule
towards first molecule
Args:
mol1: First molecule. OpenBabel OBMol object
mol2: Second molecule. OpenBabel OBMol object
vmol1: First virtual molecule constructed by centroids. OpenBabel
OBMol object
vmol2: First virtual molecule constructed by centroids. OpenBabel
OBMol object
ilabel1: inchi label map of the first molecule
ilabel2: inchi label map of the second molecule
eq_atoms: equivalent atom lables
Return:
corrected inchi labels of heavy atoms of the second molecule
"""
nvirtual = vmol1.NumAtoms()
nheavy = len(ilabel1)
for i in ilabel2: # add all heavy atoms
a1 = vmol1.NewAtom()
a1.SetAtomicNum(1)
a1.SetVector(0.0, 0.0, 0.0) # useless, just to pair with vmol2
oa2 = mol2.GetAtom(i)
a2 = vmol2.NewAtom()
a2.SetAtomicNum(1)
# align using the virtual atoms, these atoms are not
# used to align, but match by positions
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(False, False)
aligner.SetRefMol(vmol1)
aligner.SetTargetMol(vmol2)
aligner.Align()
aligner.UpdateCoords(vmol2)
canon_mol1 = ob.OBMol()
for i in ilabel1:
oa1 = mol1.GetAtom(i)
a1 = canon_mol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
aligned_mol2 = ob.OBMol()
for i in range(nvirtual + 1, nvirtual + nheavy + 1):
oa2 = vmol2.GetAtom(i)
a2 = aligned_mol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
canon_label2 = list(range(1, nheavy + 1))
for symm in eq_atoms:
for i in symm:
canon_label2[i - 1] = -1
for symm in eq_atoms:
candidates1 = list(symm)
candidates2 = list(symm)
for c2 in candidates2:
distance = 99999.0
canon_idx = candidates1[0]
a2 = aligned_mol2.GetAtom(c2)
for c1 in candidates1:
a1 = canon_mol1.GetAtom(c1)
d = a1.GetDistance(a2)
if d < distance:
distance = d
canon_idx = c1
canon_label2[c2 - 1] = canon_idx
candidates1.remove(canon_idx)
canon_inchi_orig_map2 = list(zip(canon_label2, list(range(1, nheavy + 1)), ilabel2))
canon_inchi_orig_map2.sort(key=lambda m: m[0])
heavy_atom_indices2 = tuple([x[2] for x in canon_inchi_orig_map2])
return heavy_atom_indices2
@staticmethod
def _align_hydrogen_atoms(mol1, mol2, heavy_indices1, heavy_indices2):
"""
Align the label of topologically identical atoms of second molecule
towards first molecule
Args:
mol1: First molecule. OpenBabel OBMol object
mol2: Second molecule. OpenBabel OBMol object
heavy_indices1: inchi label map of the first molecule
heavy_indices2: label map of the second molecule
Return:
corrected label map of all atoms of the second molecule
"""
num_atoms = mol2.NumAtoms()
all_atom = set(range(1, num_atoms + 1))
hydrogen_atoms1 = all_atom - set(heavy_indices1)
hydrogen_atoms2 = all_atom - set(heavy_indices2)
label1 = heavy_indices1 + tuple(hydrogen_atoms1)
label2 = heavy_indices2 + tuple(hydrogen_atoms2)
cmol1 = ob.OBMol()
for i in label1:
oa1 = mol1.GetAtom(i)
a1 = cmol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
cmol2 = ob.OBMol()
for i in label2:
oa2 = mol2.GetAtom(i)
a2 = cmol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(False, False)
aligner.SetRefMol(cmol1)
aligner.SetTargetMol(cmol2)
aligner.Align()
aligner.UpdateCoords(cmol2)
hydrogen_label2 = []
hydrogen_label1 = list(range(len(heavy_indices1) + 1, num_atoms + 1))
for h2 in range(len(heavy_indices2) + 1, num_atoms + 1):
distance = 99999.0
idx = hydrogen_label1[0]
a2 = cmol2.GetAtom(h2)
for h1 in hydrogen_label1:
a1 = cmol1.GetAtom(h1)
d = a1.GetDistance(a2)
if d < distance:
distance = d
idx = h1
hydrogen_label2.append(idx)
hydrogen_label1.remove(idx)
hydrogen_orig_idx2 = label2[len(heavy_indices2) :]
hydrogen_canon_orig_map2 = list(zip(hydrogen_label2, hydrogen_orig_idx2))
hydrogen_canon_orig_map2.sort(key=lambda m: m[0])
hydrogen_canon_indices2 = [x[1] for x in hydrogen_canon_orig_map2]
canon_label1 = label1
canon_label2 = heavy_indices2 + tuple(hydrogen_canon_indices2)
return canon_label1, canon_label2
@staticmethod
def _get_elements(mol, label):
"""
The the elements of the atoms in the specified order
Args:
mol: The molecule. OpenBabel OBMol object.
label: The atom indices. List of integers.
Returns:
Elements. List of integers.
"""
elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]
return elements
def _is_molecule_linear(self, mol):
"""
Is the molecule a linear one
Args:
mol: The molecule. OpenBabel OBMol object.
Returns:
Boolean value.
"""
if mol.NumAtoms() < 3:
return True
a1 = mol.GetAtom(1)
a2 = mol.GetAtom(2)
for i in range(3, mol.NumAtoms() + 1):
angle = float(mol.GetAtom(i).GetAngle(a2, a1))
if angle < 0.0:
angle = -angle
if angle > 90.0:
angle = 180.0 - angle
if angle > self._angle_tolerance:
return False
return True
def uniform_labels(self, mol1, mol2):
"""
Args:
mol1 (Molecule): Molecule 1
mol2 (Molecule): Molecule 2
Returns:
Labels
"""
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
ilabel1, iequal_atom1, inchi1 = self._inchi_labels(obmol1)
ilabel2, iequal_atom2, inchi2 = self._inchi_labels(obmol2)
if inchi1 != inchi2:
return None, None # Topoligically different
if iequal_atom1 != iequal_atom2:
raise Exception("Design Error! Equavilent atoms are inconsistent")
vmol1 = self._virtual_molecule(obmol1, ilabel1, iequal_atom1)
vmol2 = self._virtual_molecule(obmol2, ilabel2, iequal_atom2)
if vmol1.NumAtoms() != vmol2.NumAtoms():
return None, None
if vmol1.NumAtoms() < 3 or self._is_molecule_linear(vmol1) or self._is_molecule_linear(vmol2):
# using isomorphism for difficult (actually simple) molecules
clabel1, clabel2 = self._assistant_mapper.uniform_labels(mol1, mol2)
else:
heavy_atom_indices2 = self._align_heavy_atoms(obmol1, obmol2, vmol1, vmol2, ilabel1, ilabel2, iequal_atom1)
clabel1, clabel2 = self._align_hydrogen_atoms(obmol1, obmol2, ilabel1, heavy_atom_indices2)
if clabel1 and clabel2:
elements1 = self._get_elements(obmol1, clabel1)
elements2 = self._get_elements(obmol2, clabel2)
if elements1 != elements2:
return None, None
return clabel1, clabel2
def get_molecule_hash(self, mol):
"""
Return inchi as molecular hash
"""
obmol = BabelMolAdaptor(mol).openbabel_mol
inchi = self._inchi_labels(obmol)[2]
return inchi
class MoleculeMatcher(MSONable):
"""
Class to match molecules and identify whether molecules are the same.
"""
@requires(
ob,
"BabelMolAdaptor requires openbabel to be installed with "
"Python bindings. Please get it at http://openbabel.org "
"(version >=3.0.0).",
)
def __init__(self, tolerance=0.01, mapper=InchiMolAtomMapper()):
"""
Args:
tolerance (float): RMSD difference threshold whether two molecules are
different
mapper (AbstractMolAtomMapper): MolAtomMapper object that is able to map the atoms of two
molecule to uniform order
"""
self._tolerance = tolerance
self._mapper = mapper
def fit(self, mol1, mol2):
"""
Fit two molecules.
Args:
mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object
mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object
Returns:
A boolean value indicates whether two molecules are the same.
"""
return self.get_rmsd(mol1, mol2) < self._tolerance
def get_rmsd(self, mol1, mol2):
"""
Get RMSD between two molecule with arbitrary atom order.
Returns:
RMSD if topology of the two molecules are the same
Infinite if the topology is different
"""
label1, label2 = self._mapper.uniform_labels(mol1, mol2)
if label1 is None or label2 is None:
return float("Inf")
return self._calc_rms(mol1, mol2, label1, label2)
@staticmethod
def _calc_rms(mol1, mol2, clabel1, clabel2):
"""
Calculate the RMSD.
Args:
mol1: The first molecule. OpenBabel OBMol or pymatgen Molecule
object
mol2: The second molecule. OpenBabel OBMol or pymatgen Molecule
object
clabel1: The atom indices that can reorder the first molecule to
uniform atom order
clabel1: The atom indices that can reorder the second molecule to
uniform atom order
Returns:
The RMSD.
"""
obmol1 = BabelMolAdaptor(mol1).openbabel_mol
obmol2 = BabelMolAdaptor(mol2).openbabel_mol
cmol1 = ob.OBMol()
for i in clabel1:
oa1 = obmol1.GetAtom(i)
a1 = cmol1.NewAtom()
a1.SetAtomicNum(oa1.GetAtomicNum())
a1.SetVector(oa1.GetVector())
cmol2 = ob.OBMol()
for i in clabel2:
oa2 = obmol2.GetAtom(i)
a2 = cmol2.NewAtom()
a2.SetAtomicNum(oa2.GetAtomicNum())
a2.SetVector(oa2.GetVector())
aligner = ob.OBAlign(True, False)
aligner.SetRefMol(cmol1)
aligner.SetTargetMol(cmol2)
aligner.Align()
return aligner.GetRMSD()
def group_molecules(self, mol_list):
"""
Group molecules by structural equality.
Args:
mol_list: List of OpenBabel OBMol or pymatgen objects
Returns:
A list of lists of matched molecules
Assumption: if s1=s2 and s2=s3, then s1=s3
This may not be true for small tolerances.
"""
mol_hash = [(i, self._mapper.get_molecule_hash(m)) for i, m in enumerate(mol_list)]
mol_hash.sort(key=lambda x: x[1])
# Use molecular hash to pre-group molecules.
raw_groups = tuple([tuple([m[0] for m in g]) for k, g in itertools.groupby(mol_hash, key=lambda x: x[1])])
group_indices = []
for rg in raw_groups:
mol_eq_test = [
(p[0], p[1], self.fit(mol_list[p[0]], mol_list[p[1]])) for p in itertools.combinations(sorted(rg), 2)
]
mol_eq = {(p[0], p[1]) for p in mol_eq_test if p[2]}
not_alone_mols = set(itertools.chain.from_iterable(mol_eq))
alone_mols = set(rg) - not_alone_mols
group_indices.extend([[m] for m in alone_mols])
while len(not_alone_mols) > 0:
current_group = {not_alone_mols.pop()}
while len(not_alone_mols) > 0:
candidate_pairs = {tuple(sorted(p)) for p in itertools.product(current_group, not_alone_mols)}
mutual_pairs = candidate_pairs & mol_eq
if len(mutual_pairs) == 0:
break
mutual_mols = set(itertools.chain.from_iterable(mutual_pairs))
current_group |= mutual_mols
not_alone_mols -= mutual_mols
group_indices.append(sorted(current_group))
group_indices.sort(key=lambda x: (len(x), -x[0]), reverse=True)
all_groups = [[mol_list[i] for i in g] for g in group_indices]
return all_groups
def as_dict(self):
"""
Returns:
MSONAble dict.
"""
return {
"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"tolerance": self._tolerance,
"mapper": self._mapper.as_dict(),
}
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
MoleculeMatcher
"""
return MoleculeMatcher(
tolerance=d["tolerance"],
mapper=AbstractMolAtomMapper.from_dict(d["mapper"]),
)
class KabschMatcher(MSONable):
"""Molecule matcher using Kabsch algorithm
The Kabsch algorithm capable aligning two molecules by finding the parameters
(translation, rotation) which minimize the root-mean-square-deviation (RMSD) of
two molecules which are topologically (atom types, geometry) similar two each other.
Notes:
When aligning molecules, the atoms of the two molecules **must** be in the same
order for the results to be sensible.
"""
def __init__(self, target: Molecule):
"""Constructor of the matcher object.
Args:
target: a `Molecule` object used as a target during the alignment
"""
self.target = target
def match(self, p: Molecule):
"""Using the Kabsch algorithm the alignment of two molecules (P, Q)
happens in three steps:
- translate the P and Q into their centroid
- compute of the optimal rotation matrix (U) using Kabsch algorithm
- compute the translation (V) and rmsd
The function returns the rotation matrix (U), translation vector (V),
and RMSD between Q and P', where P' is:
P' = P * U + V
Args:
p: a `Molecule` object what will be matched with the target one.
Returns:
U: Rotation matrix (D,D)
V: Translation vector (D)
RMSD : Root mean squared deviation between P and Q
"""
if self.target.atomic_numbers != p.atomic_numbers:
raise ValueError("The order of the species aren't matching! " "Please try using `PermInvMatcher`.")
p_coord, q_coord = p.cart_coords, self.target.cart_coords
# Both sets of coordinates must be translated first, so that their
# centroid coincides with the origin of the coordinate system.
p_trans, q_trans = p_coord.mean(axis=0), q_coord.mean(axis=0)
p_centroid, q_centroid = p_coord - p_trans, q_coord - q_trans
# The optimal rotation matrix U using Kabsch algorithm
U = self.kabsch(p_centroid, q_centroid)
p_prime_centroid = np.dot(p_centroid, U)
rmsd = np.sqrt(np.mean(np.square(p_prime_centroid - q_centroid)))
V = q_trans - np.dot(p_trans, U)
return U, V, rmsd
def fit(self, p: Molecule):
"""Rotate and transform `p` molecule according to the best match.
Args:
p: a `Molecule` object what will be matched with the target one.
Returns:
p_prime: Rotated and translated of the `p` `Molecule` object
rmsd: Root-mean-square-deviation between `p_prime` and the `target`
"""
U, V, rmsd = self.match(p)
# Rotate and translate matrix `p` onto the target molecule.
# P' = P * U + V
p_prime = p.copy()
for site in p_prime:
site.coords = np.dot(site.coords, U) + V
return p_prime, rmsd
@staticmethod
def kabsch(P: np.ndarray, Q: np.ndarray):
"""The Kabsch algorithm is a method for calculating the optimal rotation matrix
that minimizes the root mean squared deviation (RMSD) between two paired sets of points
P and Q, centered around the their centroid.
For more info see:
- http://en.wikipedia.org/wiki/Kabsch_algorithm and
- https://cnx.org/contents/HV-RsdwL@23/Molecular-Distance-Measures
Args:
P: Nx3 matrix, where N is the number of points.
Q: Nx3 matrix, where N is the number of points.
Returns:
U: 3x3 rotation matrix
"""
# Computation of the cross-covariance matrix
C = np.dot(P.T, Q)
# Computation of the optimal rotation matrix
# using singular value decomposition (SVD).
V, S, WT = np.linalg.svd(C)
# Getting the sign of the det(V*Wt) to decide whether
d = np.linalg.det(np.dot(V, WT))
# And finally calculating the optimal rotation matrix R
# we need to correct our rotation matrix to ensure a right-handed coordinate system.
U = np.dot(np.dot(V, np.diag([1, 1, d])), WT)
return U
class BruteForceOrderMatcher(KabschMatcher):
"""Finding the best match between molecules by selecting molecule order
with the smallest RMSD from all the possible order combinations.
Notes:
When aligning molecules, the atoms of the two molecules **must** have same number
of atoms from the same species.
"""
def match(self, p: Molecule, ignore_warning=False):
"""Similar as `KabschMatcher.match` but this method also finds the order of
atoms which belongs to the best match.
A `ValueError` will be raised when the total number of possible combinations
become unfeasible (more than a million combination).
Args:
p: a `Molecule` object what will be matched with the target one.
ignore_warning: ignoring error when the number of combination is too large
Returns:
inds: The indices of atoms
U: 3x3 rotation matrix
V: Translation vector
rmsd: Root mean squared deviation between P and Q
"""
q = self.target
if sorted(p.atomic_numbers) != sorted(q.atomic_numbers):
raise ValueError("The number of the same species aren't matching!")
_, count = np.unique(p.atomic_numbers, return_counts=True)
total_permutations = 1
for c in count:
total_permutations *= np.math.factorial(c) # type: ignore
if not ignore_warning and total_permutations > 1_000_000:
raise ValueError(
"The number of all possible permutations "
"({}) is not feasible to run this method!".format(total_permutations)
)
p_coord, q_coord = p.cart_coords, q.cart_coords
p_atoms, q_atoms = np.array(p.atomic_numbers), np.array(q.atomic_numbers)
# Both sets of coordinates must be translated first, so that
# their centroid coincides with the origin of the coordinate system.
p_trans, q_trans = p_coord.mean(axis=0), q_coord.mean(axis=0)
p_centroid, q_centroid = p_coord - p_trans, q_coord - q_trans
# Sort the order of the target molecule by the elements
q_inds = np.argsort(q_atoms)
q_centroid = q_centroid[q_inds]
# Initializing return values
rmsd = np.inf
# Generate all permutation grouped/sorted by the elements
for p_inds_test in self.permutations(p_atoms):
p_centroid_test = p_centroid[p_inds_test]
U_test = self.kabsch(p_centroid_test, q_centroid)
p_centroid_prime_test = np.dot(p_centroid_test, U_test)
rmsd_test = np.sqrt(np.mean(np.square(p_centroid_prime_test - q_centroid)))
if rmsd_test < rmsd:
p_inds, U, rmsd = p_inds_test, U_test, rmsd_test
# Rotate and translate matrix P unto matrix Q using Kabsch algorithm.
# P' = P * U + V
V = q_trans - np.dot(p_trans, U)
# Using the original order of the indices
inds = p_inds[np.argsort(q_inds)]
return inds, U, V, rmsd
def fit(self, p: Molecule, ignore_warning=False):
"""Order, rotate and transform `p` molecule according to the best match.
A `ValueError` will be raised when the total number of possible combinations
become unfeasible (more than a million combinations).
Args:
p: a `Molecule` object what will be matched with the target one.
ignore_warning: ignoring error when the number of combination is too large
Returns:
p_prime: Rotated and translated of the `p` `Molecule` object
rmsd: Root-mean-square-deviation between `p_prime` and the `target`
"""
inds, U, V, rmsd = self.match(p, ignore_warning=ignore_warning)
p_prime = Molecule.from_sites([p[i] for i in inds])
for site in p_prime:
site.coords = np.dot(site.coords, U) + V
return p_prime, rmsd
@staticmethod
def permutations(atoms):
"""Generates all the possible permutations of atom order. To achieve better
performance all tha cases where the atoms are different has been ignored.
"""
element_iterators = [itertools.permutations(np.where(atoms == element)[0]) for element in np.unique(atoms)]
for inds in itertools.product(*element_iterators):
yield np.array(list(itertools.chain(*inds)))
class HungarianOrderMatcher(KabschMatcher):
"""This method pre-aligns the molecules based on their principal inertia
axis and then re-orders the input atom list using the Hungarian method.
Notes:
This method cannot guarantee the best match but is very fast.
When aligning molecules, the atoms of the two molecules **must** have same number
of atoms from the same species.
"""
def match(self, p: Molecule):
"""Similar as `KabschMatcher.match` but this method also finds the order of
atoms which belongs to the best match.
Args:
p: a `Molecule` object what will be matched with the target one.
Returns:
inds: The indices of atoms
U: 3x3 rotation matrix
V: Translation vector
rmsd: Root mean squared deviation between P and Q
"""
if sorted(p.atomic_numbers) != sorted(self.target.atomic_numbers):
raise ValueError("The number of the same species aren't matching!")
p_coord, q_coord = p.cart_coords, self.target.cart_coords
p_atoms, q_atoms = (
np.array(p.atomic_numbers),
np.array(self.target.atomic_numbers),
)
p_weights = np.array([site.species.weight for site in p])
q_weights = np.array([site.species.weight for site in self.target])
# Both sets of coordinates must be translated first, so that
# their center of mass with the origin of the coordinate system.
p_trans, q_trans = p.center_of_mass, self.target.center_of_mass
p_centroid, q_centroid = p_coord - p_trans, q_coord - q_trans
# Initializing return values
rmsd = np.inf
# Generate all permutation grouped/sorted by the elements
for p_inds_test in self.permutations(p_atoms, p_centroid, p_weights, q_atoms, q_centroid, q_weights):
p_centroid_test = p_centroid[p_inds_test]
U_test = self.kabsch(p_centroid_test, q_centroid)
p_centroid_prime_test = np.dot(p_centroid_test, U_test)
rmsd_test = np.sqrt(np.mean(np.square(p_centroid_prime_test - q_centroid)))
if rmsd_test < rmsd:
inds, U, rmsd = p_inds_test, U_test, rmsd_test
# Rotate and translate matrix P unto matrix Q using Kabsch algorithm.
# P' = P * U + V
V = q_trans - np.dot(p_trans, U)
return inds, U, V, rmsd
def fit(self, p: Molecule):
"""Order, rotate and transform `p` molecule according to the best match.
Args:
p: a `Molecule` object what will be matched with the target one.
Returns:
p_prime: Rotated and translated of the `p` `Molecule` object
rmsd: Root-mean-square-deviation between `p_prime` and the `target`
"""
inds, U, V, rmsd = self.match(p)
# Translate and rotate `mol1` unto `mol2` using Kabsch algorithm.
p_prime = Molecule.from_sites([p[i] for i in inds])
for site in p_prime:
site.coords = np.dot(site.coords, U) + V
return p_prime, rmsd
@staticmethod
def permutations(p_atoms, p_centroid, p_weights, q_atoms, q_centroid, q_weights):
"""Generates two possible permutations of atom order. This method uses the principle component
of the inertia tensor to prealign the molecules and hungarian method to determine the order.
There are always two possible permutation depending on the way to pre-aligning the molecules.
Args:
p_atoms: atom numbers
p_centroid: array of atom positions
p_weights: array of atom weights
q_atoms: atom numbers
q_centroid: array of atom positions
q_weights: array of atom weights
Yield:
perm_inds: array of atoms' order
"""
# get the principal axis of P and Q
p_axis = HungarianOrderMatcher.get_principal_axis(p_centroid, p_weights)
q_axis = HungarianOrderMatcher.get_principal_axis(q_centroid, q_weights)
# rotate Q onto P considering that the axis are parallel and antiparallel
U = HungarianOrderMatcher.rotation_matrix_vectors(q_axis, p_axis)
p_centroid_test = np.dot(p_centroid, U)
# generate full view from q shape to fill in atom view on the fly
perm_inds = np.zeros(len(p_atoms), dtype=int)
# Find unique atoms
species = np.unique(p_atoms)
for specie in species:
p_atom_inds = np.where(p_atoms == specie)[0]
q_atom_inds = np.where(q_atoms == specie)[0]
A = q_centroid[q_atom_inds]
B = p_centroid_test[p_atom_inds]
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
distances = cdist(A, B, "euclidean")
a_inds, b_inds = linear_sum_assignment(distances)
perm_inds[q_atom_inds] = p_atom_inds[b_inds]
yield perm_inds
# rotate Q onto P considering that the axis are parallel and antiparallel
U = HungarianOrderMatcher.rotation_matrix_vectors(q_axis, -p_axis)
p_centroid_test = np.dot(p_centroid, U)
# generate full view from q shape to fill in atom view on the fly
perm_inds = np.zeros(len(p_atoms), dtype=int)
# Find unique atoms
species = np.unique(p_atoms)
for specie in species:
p_atom_inds = np.where(p_atoms == specie)[0]
q_atom_inds = | np.where(q_atoms == specie) | numpy.where |
import ast
import json
import numpy as np
from utils import *
from linalg import *
import networkx as nx
from itertools import chain
from collections import Counter
from typing import Any, Dict, Iterable, List, NewType, Tuple, TypeVar, Set
ndarray = NewType('numpy ndarray', np.ndarray)
CountDict = TypeVar('result of Counter', Dict[str, int], Dict[str, float])
LabelDict = NewType('dictionary of {id: set of all its GO labels}', Dict[str, Set[str]])
def load_matrices(job_id: str, run_id: str)-> Tuple[ndarray, ndarray]:
if not file_exists(f"{job_id}/dsd_pdist_matrix{run_id}.npy", 'PREDICTION', ext='.npy'): exit()
if not file_exists(f"{job_id}/source_dsd_pdist_matrix{run_id}.npy", 'PREDICTION', ext='.npy'): exit()
source_dsd_matrix = np.load(f"{job_id}/source_dsd_pdist_matrix{run_id}.npy", allow_pickle=True)
target_dsd_matrix = np.load(f"{job_id}/dsd_pdist_matrix{run_id}.npy", allow_pickle=True)
return source_dsd_matrix, target_dsd_matrix
def load_labels(job_id: str, network_name: str, strawman_number: str, run_id: str)-> Dict[str, str]:
if not file_exists(f"{job_id}/strawman_{network_name}_labels{run_id}.json", 'PREDICTION', ext='.json'): exit()
labels = None
with open(f"{job_id}/strawman_{network_name}_labels{run_id}.json", "r") as lptr:
labels = json.load(lptr)
labels = {v:int(k) for k,v in labels.items()}
return labels
def load_and_index_hits(source_ref_labels: Dict[str, str], target_ref_labels: Dict[str, str], job_id: str, strawman_number: str)-> Dict[int, List[int]]:
if not file_exists(f'{job_id}/strawman{strawman_number}_hits.txt', 'PREDICTION'): exit()
hits = dict()
with open(f'{job_id}/strawman{strawman_number}_hits.txt', 'r') as rptr:
for line in rptr.readlines():
a,b = line.split('\t')
hits[a] = ast.literal_eval(b)
hit_idxs = dict()
for target_query, hitlist in hits.items():
hit_idxs[target_ref_labels.get(target_query)] = [source_ref_labels.get(source_match) for source_match in hitlist]
return {k:v for k,v in hit_idxs.items() if k and v}
def map_ref_to_GO(go_file: str, ref_file: str, go_aspect: str)-> LabelDict:
if not file_exists(go_file, 'PREDICTION', ext='.gaf'): exit()
if not file_exists(ref_file, 'PREDICTION', ext='.json'): exit()
refseq_to_uniprot_mapping, uniprot_to_GO_mapping = dict(), dict()
with open(ref_file, 'r') as rptr, open(go_file, 'r') as gptr:
refseq_to_uniprot_mapping = json.load(rptr)
for entry in gptr.readlines()[12:]: # skip the description lines
db, uni_id, _, _, go_id, _, _, _, aspect = entry.strip().split('\t')[:9]
if not "uniprotkb" in db.lower(): continue # GO labels are ** not ** unique ---> {uni: {go_id1, go_id2, ...}, ...}
if 'A' not in go_aspect and aspect.strip().upper() not in go_aspect: continue # filter by aspect
uniprot_to_GO_mapping[uni_id] = uniprot_to_GO_mapping.get(uni_id, set()) | {go_id}
refseq_to_GO_mapping = {r:g for r,g in {ref_id:uniprot_to_GO_mapping.get(uni_id) for ref_id, uni_id in refseq_to_uniprot_mapping.items()}.items() if g}
return refseq_to_GO_mapping
def filter_labels(tgt_GO_labels: Dict[int, str], annotation_counts: CountDict, low: int = 50, high: int = 500)-> LabelDict:
filtered_tgt_labels = dict()
# n_limited = len([ann for ann, c in annotation_counts.items() if low <= c <= high])
# print(f'{n_limited} between {low} and {high}')
for i, go_ids in tgt_GO_labels.items():
for go_id in go_ids:
n_annotations = annotation_counts.get(go_id, -1) # n_annotations from TARGET networks
if not low <= n_annotations <= high: continue
filtered_tgt_labels[i] = filtered_tgt_labels.get(i, set()) | {go_id}
if not filtered_tgt_labels.get(i):
filtered_tgt_labels[i] = set() # fill in missing ones with empties - they just have no vote.
return filtered_tgt_labels # {i: {go_id1, go_id2, ...}, ...}
def compute_accuracy(predictions: Dict[int, List[str]], target_GO_labels: Dict[int, str])-> float:
n_correct = 0
n_predicted = 0
n_empty = 0
for test_idx, predicted_label_list in predictions.items():
real_labels = target_GO_labels.get(test_idx)
if not real_labels: n_empty +=1; continue
n_predicted += 1
if any([True for p in predicted_label_list if p in real_labels]): n_correct += 1
#print(f"{n_empty} empty out of {n_predicted} predictions")
if not n_predicted: return 0
return (n_correct/n_predicted)*100
def wmv(target_counts: CountDict, hit_counts: CountDict, weights: Tuple[float, float])-> CountDict:
tw, hw = weights
combo = {go_label:(count * tw) for go_label, count in target_counts.items()} if target_counts else dict()
if not hit_counts: return combo
for go_label, count in hit_counts.items():
combo[go_label] = (count * hw) + combo.get(go_label, 0)
return combo
def poll_neighborhood(neighbors: ndarray, labels: Dict[int, str], test_idxs: ndarray, indexed_vote_dict: Dict[int, CountDict])-> None:
real_row_idx = test_idxs[len(indexed_vote_dict)]
votes = chain(*np.vectorize(labels.get)(neighbors).tolist())
try:
iterator = iter(votes)
indexed_vote_dict[real_row_idx] = Counter(votes)
except TypeError:
indexed_vote_dict[real_row_idx] = Counter()
def poll_hits(test_idxs: ndarray, hit_idxs: Dict[int,int], source_dsd_matrix: ndarray,
source_GO_labels: Dict[int, str], q: int, strawman_number: str)-> CountDict:
hit_votes = dict()
include_hit_dsd_neighbors = True if '+' in strawman_number else False
for test_idx in test_idxs:
hitlist = hit_idxs.get(test_idx)
if not hitlist: hit_votes[test_idx] = dict(); continue
for source_match_idx in hitlist:
match_votes, match_neighbor_votes = Counter(source_GO_labels.get(source_match_idx)), Counter()
if include_hit_dsd_neighbors:
match_neighbor_idxs = np.argsort(source_dsd_matrix[source_match_idx, :])[:q]
match_neighbor_votes = Counter(chain(*np.vectorize(source_GO_labels.__getitem__)(match_neighbor_idxs)))
hit_votes[test_idx] = match_votes + match_neighbor_votes
return hit_votes
def train_test_split(dim: int, block_size: int, seed: int)-> Tuple[ndarray, ndarray]:
np.random.seed(seed)
if block_size == dim: block_size = 1
test_idxs = np.random.choice(np.arange(dim), size=block_size, replace=False)
train_idxs = np.delete(np.arange(dim), test_idxs)
return train_idxs, test_idxs
def k_fold_cv(source_GO_labels: Dict[int, str], target_GO_labels: Dict[int, str], hit_idxs: ndarray, source_dsd_matrix: ndarray, target_dsd_matrix: ndarray,
k: int, seed: int, p: int, q: int, n_labels: int, weights: List[float], strawman_number: str, verbose: bool)-> List[float]:
m = target_dsd_matrix.shape[0]
if not k: print('Fold size (k) cannot be zero, muchacho'); exit()
if not is_square(source_dsd_matrix) or not is_square(target_dsd_matrix): print('[PREDICTION ERROR] Provided matrices have invalid shapes'); exit()
if not seed: seed = np.random.randint(10000)
n_rounds = abs(k)
accuracy = list()
for i in range(n_rounds):
train_idxs, test_idxs = train_test_split(m, m//n_rounds, seed+i)
if k < 0 : train_idxs, test_idxs = test_idxs, train_idxs # cascade setting, for internal BCB use
if verbose: print(f'\tStarting fold {i+1}/{n_rounds} with {len(train_idxs)} training nodes and {len(test_idxs)} testing nodes...')
if verbose: print(f'\t\tExtracting fold from full matrices...')
target_grid = np.ix_(test_idxs, train_idxs)
target_fold = target_dsd_matrix[target_grid] # (m/k) x (m(k-1)/k)
if verbose: print(f'\t\tLocating neighbor indexes in fold...')
target_grid_idxs = np.argsort(target_fold, axis=1)[:,:p]
if verbose: print(f'\t\tRe-indexing neighbors to match original matrices...')
target_neighbor_col_idxs = np.apply_along_axis( | np.vectorize(train_idxs.__getitem__) | numpy.vectorize |
"""
FBM_single_functions.py
This script contains functions for activating and testing of Fractional Brownian Motion
single-trajectory networks trained to estimate the Hurst exponent.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from keras.models import load_model
from utils import fbm_diffusion
from utils import autocorr
import scipy.optimize
import scipy.io
import seaborn as sns
import pickle
"""
Function predict_1D is used to run a version of the network on a single trajectory.
The function assumes the input is a column vector
Input:
x - 1D column vector containing localization data
stepsActual - number of steps to analyze in each trajectory. This number
determnines what network will run on the data.
Select from options: [10,25,60,100,200,300,500,700,1000]
reg_model - loaded model of the network (comes as input to avoid
reloading the same model for each trajectory)
Outputs:
pX - Network prediction for the Hurst exponenet of the 1D trajectory.
"""
def predict_1D(x,stepsActual,reg_model):
if len(x)<stepsActual:
return 0
dx = (np.diff(x[:stepsActual],axis=0)[:,0])
dx = autocorr((dx-np.mean(dx))/(np.std(dx)+1e-10))
dx = np.reshape(dx,[1,np.size(dx),1])
pX = reg_model.predict(dx)
return pX
"""
Function net_on_file is used to run a version of the network on a .mat file
containing one or more single particle trajectories.
The function assumes the input comes in the form x,y,z,...,N where N is the
trajectory serial number, starting from one.
Input:
file - string containing the file name, ending with .mat
Outputs:
prediction - A vector with lenght as number of trajectories containing
network predictions (average of N-dimensional predictions)
NDpred - A matrix with dimensions [#trajetories,#dimensions] containing
all predictions done by the network (N-dimensions for each trajectory)
"""
def FBM_net_on_file(file,stepsActual):
# laod trained keras model
### change here to load a different network model
net_file = './Models/300-H-estimate.h5'
reg_model = load_model(net_file)
###
# load mat file and extract trajectory data
f = scipy.io.loadmat(file)
for k in f.keys():
if k[0]=='_':
continue
varName = k
data = f[varName]
NAxes = (np.shape(data)[1]-1)
numTraj = len(np.unique(data[:,NAxes]))
# allocate variables to hold temporary data and prediction results
prediction = np.zeros([numTraj,1])
NDpred = np.zeros([numTraj,(np.shape(data)[1]-1)])
# iterate over trajectories and analyze data
for i in np.arange(0,numTraj):
for j in range((np.shape(data)[1]-1)):
x = data[np.argwhere(data[:,NAxes]==i+1),j]
pX = predict_1D(x,stepsActual,reg_model)
NDpred[i,j] = pX
NDpred = NDpred[np.where(NDpred>0)]
NDpred = np.reshape(NDpred,[int(np.size(NDpred)/NAxes),NAxes])
prediction = | np.mean(NDpred,axis=1) | numpy.mean |
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
import copy
import os
from types import SimpleNamespace
import numpy as np
import pandas as pd
import scipy.sparse as sp
import typing as t
import logging as pylog
from elliot.dataset.abstract_dataset import AbstractDataset
from elliot.splitter.base_splitter import Splitter
from elliot.prefiltering.standard_prefilters import PreFilter
from elliot.negative_sampling.negative_sampling import NegativeSampler
from elliot.utils import logging
from elliot.dataset.modular_loaders.loader_coordinator_mixin import LoaderCoordinator
class DataSetLoader(LoaderCoordinator):
"""
Load train and test dataset
"""
def __init__(self, config, *args, **kwargs):
"""
Constructor of DataSet
:param path_train_data: relative path for train file
:param path_test_data: relative path for test file
"""
self.logger = logging.get_logger(self.__class__.__name__)
self.args = args
self.kwargs = kwargs
self.config = config
self.column_names = ['userId', 'itemId', 'rating', 'timestamp']
if config.config_test:
return
if config.data_config.strategy == "fixed":
path_train_data = config.data_config.train_path
path_val_data = getattr(config.data_config, "validation_path", None)
path_test_data = config.data_config.test_path
self.train_dataframe = pd.read_csv(path_train_data, sep="\t", header=None, names=self.column_names)
self.test_dataframe = pd.read_csv(path_test_data, sep="\t", header=None, names=self.column_names)
# self.train_dataframe, self.side_information = self.coordinate_information(self.train_dataframe, sides=config.data_config.side_information)
# self.train_dataframe = pd.read_csv(path_train_data, sep="\t", header=None, names=self.column_names)
self.train_dataframe = self.check_timestamp(self.train_dataframe)
self.test_dataframe = self.check_timestamp(self.test_dataframe)
self.logger.info(f"{path_train_data} - Loaded")
if config.binarize == True or all(self.train_dataframe["rating"].isna()):
self.test_dataframe["rating"] = 1
self.train_dataframe["rating"] = 1
if path_val_data:
self.validation_dataframe = pd.read_csv(path_val_data, sep="\t", header=None, names=self.column_names)
self.validation_dataframe = self.check_timestamp(self.validation_dataframe)
if config.binarize == True or all(self.train_dataframe["rating"].isna()):
self.validation_dataframe["rating"] = 1
self.tuple_list = [([(self.train_dataframe, self.validation_dataframe)], self.test_dataframe)]
self.tuple_list, self.side_information = self.coordinate_information(self.tuple_list,
sides=config.data_config.side_information,
logger=self.logger)
else:
self.tuple_list = [(self.train_dataframe, self.test_dataframe)]
self.tuple_list, self.side_information = self.coordinate_information(self.tuple_list,
sides=config.data_config.side_information,
logger=self.logger)
elif config.data_config.strategy == "hierarchy":
self.tuple_list = self.read_splitting(config.data_config.root_folder, column_names=self.column_names)
self.tuple_list, self.side_information = self.coordinate_information(self.tuple_list, sides=config.data_config.side_information,
logger=self.logger)
elif config.data_config.strategy == "dataset":
self.logger.info("There will be the splitting")
path_dataset = config.data_config.dataset_path
self.dataframe = pd.read_csv(path_dataset, sep="\t", header=None, names=self.column_names)
self.dataframe, self.side_information = self.coordinate_information(self.dataframe,
sides=config.data_config.side_information,
logger=self.logger)
# self.dataframe = pd.read_csv(path_dataset, sep="\t", header=None, names=self.column_names)
self.dataframe = self.check_timestamp(self.dataframe)
self.logger.info(('{0} - Loaded'.format(path_dataset)))
self.dataframe = PreFilter.filter(self.dataframe, self.config)
if config.binarize == True or all(self.dataframe["rating"].isna()):
self.dataframe["rating"] = 1
splitter = Splitter(self.dataframe, self.config.splitting, self.config.random_seed)
self.tuple_list = splitter.process_splitting()
else:
raise Exception("Strategy option not recognized")
if isinstance(self.tuple_list[0][1], list):
self.logger.warning("You are using a splitting strategy with folds. "
"Paired TTest and Wilcoxon Test are not available!")
self.config.evaluation.paired_ttest = False
self.config.evaluation.wilcoxon_test = False
def check_timestamp(self, d: pd.DataFrame) -> pd.DataFrame:
if all(d["timestamp"].isna()):
d = d.drop(columns=["timestamp"]).reset_index(drop=True)
return d
def read_splitting(self, folder_path, column_names):
tuple_list = []
for dirs in os.listdir(folder_path):
for test_dir in dirs:
test_ = pd.read_csv(os.sep.join([folder_path, test_dir, "test.tsv"]), sep="\t", names=self.column_names)
val_dirs = [os.sep.join([folder_path, test_dir, val_dir]) for val_dir in os.listdir(os.sep.join([folder_path, test_dir])) if os.path.isdir(os.sep.join([folder_path, test_dir, val_dir]))]
val_list = []
for val_dir in val_dirs:
train_ = pd.read_csv(os.sep.join([val_dir, "train.tsv"]), sep="\t", names=self.column_names)
val_ = pd.read_csv(os.sep.join([val_dir, "val.tsv"]), sep="\t", names=self.column_names)
val_list.append((train_, val_))
if not val_list:
val_list = pd.read_csv(os.sep.join([folder_path, test_dir, "train.tsv"]), sep="\t", names=self.column_names)
tuple_list.append((val_list, test_))
return tuple_list
def generate_dataobjects(self) -> t.List[object]:
data_list = []
for p1, (train_val, test) in enumerate(self.tuple_list):
# testset level
if isinstance(train_val, list):
# validation level
val_list = []
for p2, (train, val) in enumerate(train_val):
self.logger.info(f"Test Fold {p1} - Validation Fold {p2}")
single_dataobject = DataSet(self.config, (train,val,test), self.side_information, self.args, self.kwargs)
val_list.append(single_dataobject)
data_list.append(val_list)
else:
self.logger.info(f"Test Fold {p1}")
single_dataobject = DataSet(self.config, (train_val, test), self.side_information, self.args,
self.kwargs)
data_list.append([single_dataobject])
return data_list
def generate_dataobjects_mock(self) -> t.List[object]:
np.random.seed(self.config.random_seed)
_column_names = ['userId', 'itemId', 'rating']
training_set = np.hstack(
(np.random.randint(0, 5 * 20, size=(5 * 20, 2)), np.random.randint(0, 2, size=(5 * 20, 1))))
test_set = np.hstack(
(np.random.randint(0, 5 * 20, size=(5 * 20, 2)), np.random.randint(0, 2, size=(5 * 20, 1))))
training_set = pd.DataFrame(np.array(training_set), columns=_column_names)
test_set = pd.DataFrame(np.array(test_set), columns=_column_names)
data_list = [[DataSet(self.config, (training_set, test_set), self.args, self.kwargs)]]
return data_list
class DataSet(AbstractDataset):
"""
Load train and test dataset
"""
def __init__(self, config, data_tuple, side_information_data, *args, **kwargs):
"""
Constructor of DataSet
:param path_train_data: relative path for train file
:param path_test_data: relative path for test file
"""
self.logger = logging.get_logger(self.__class__.__name__, pylog.CRITICAL if config.config_test else
pylog.DEBUG)
self.config = config
self.args = args
self.kwargs = kwargs
if self.config.align_side_with_train == True:
self.side_information = self.align_with_training(train=data_tuple[0], side_information_data=side_information_data)
else:
self.side_information = side_information_data
self.train_dict = self.dataframe_to_dict(data_tuple[0])
self.users = list(self.train_dict.keys())
self.items = list({k for a in self.train_dict.values() for k in a.keys()})
self.num_users = len(self.users)
self.num_items = len(self.items)
self.transactions = sum(len(v) for v in self.train_dict.values())
sparsity = 1 - (self.transactions / (self.num_users * self.num_items))
self.logger.info(f"Statistics\tUsers:\t{self.num_users}\tItems:\t{self.num_items}\tTransactions:\t{self.transactions}\t"
f"Sparsity:\t{sparsity}")
self.private_users = {p: u for p, u in enumerate(self.users)}
self.public_users = {v: k for k, v in self.private_users.items()}
self.private_items = {p: i for p, i in enumerate(self.items)}
self.public_items = {v: k for k, v in self.private_items.items()}
self.i_train_dict = {self.public_users[user]: {self.public_items[i]: v for i, v in items.items()}
for user, items in self.train_dict.items()}
self.sp_i_train = self.build_sparse()
self.sp_i_train_ratings = self.build_sparse_ratings()
if len(data_tuple) == 2:
self.test_dict = self.build_dict(data_tuple[1], self.users)
if hasattr(config, "negative_sampling"):
val_neg_samples, test_neg_samples = NegativeSampler.sample(config, self.public_users, self.public_items,
self.private_users, self.private_items,
self.sp_i_train, None, self.test_dict)
sp_i_test = self.to_bool_sparse(self.test_dict)
test_candidate_items = test_neg_samples + sp_i_test
self.test_mask = np.where((test_candidate_items.toarray() == True), True, False)
else:
self.val_dict = self.build_dict(data_tuple[1], self.users)
self.test_dict = self.build_dict(data_tuple[2], self.users)
if hasattr(config, "negative_sampling"):
val_neg_samples, test_neg_samples = NegativeSampler.sample(config, self.public_users, self.public_items,
self.private_users, self.private_items,
self.sp_i_train, self.val_dict, self.test_dict)
sp_i_val = self.to_bool_sparse(self.val_dict)
sp_i_test = self.to_bool_sparse(self.test_dict)
val_candidate_items = val_neg_samples + sp_i_val
self.val_mask = np.where((val_candidate_items.toarray() == True), True, False)
test_candidate_items = test_neg_samples + sp_i_test
self.test_mask = np.where((test_candidate_items.toarray() == True), True, False)
self.allunrated_mask = np.where((self.sp_i_train.toarray() == 0), True, False)
def dataframe_to_dict(self, data):
users = list(data['userId'].unique())
"Conversion to Dictionary"
ratings = {}
for u in users:
sel_ = data[data['userId'] == u]
ratings[u] = dict(zip(sel_['itemId'], sel_['rating']))
return ratings
def build_dict(self, dataframe, users):
ratings = {}
for u in users:
sel_ = dataframe[dataframe['userId'] == u]
ratings[u] = dict(zip(sel_['itemId'], sel_['rating']))
return ratings
def build_sparse(self):
rows_cols = [(u, i) for u, items in self.i_train_dict.items() for i in items.keys()]
rows = [u for u, _ in rows_cols]
cols = [i for _, i in rows_cols]
data = sp.csr_matrix(( | np.ones_like(rows) | numpy.ones_like |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import sys, os
from unittest.mock import patch
sys.path.append(os.path.abspath("..")) # current folder is ~/tests
from idaes.core.surrogate.pysmo.polynomial_regression import (
PolynomialRegression,
FeatureScaling,
)
import numpy as np
import pandas as pd
import pytest
class TestFeatureScaling:
test_data_1d = [[x] for x in range(10)]
test_data_2d = [[x, (x + 1) ** 2] for x in range(10)]
test_data_3d = [[x, x + 10, (x + 1) ** 2 + x + 10] for x in range(10)]
test_data_3d_constant = [[x, 10, (x + 1) ** 2 + 10] for x in range(10)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9]])
expected_output_2 = np.array([[0]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(
output_1, np.array(expected_output_1).reshape(10, 1)
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 100]])
expected_output_2 = np.array([[0, 1]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 19, 119]])
expected_output_2 = np.array([[0, 10, 11]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 10, 110]])
expected_output_2 = np.array([[0, 10, 11]])
scale = expected_output_3 - expected_output_2
scale[scale == 0.0] = 1.0
expected_output_1 = (input_array - expected_output_2) / scale
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [list])
def test_data_scaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
with pytest.raises(TypeError):
FeatureScaling.data_scaling(input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
output_1 = output_1.reshape(
output_1.shape[0],
)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array.reshape(10, 1))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1]])
max_array = np.array([[5]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_06(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1, 2, 3]])
max_array = np.array([[5, 6, 7]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
class TestPolynomialRegression:
y = np.array(
[
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 21)
for j in np.linspace(0, 10, 21)
]
)
full_data = {"x1": y[:, 0], "x2": y[:, 1], "y": y[:, 2]}
training_data = [
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 5)
for j in np.linspace(0, 10, 5)
]
test_data = [[i, (i + 1) ** 2] for i in range(10)]
test_data_large = [[i, (i + 1) ** 2] for i in range(200)]
test_data_1d = [[(i + 1) ** 2] for i in range(10)]
test_data_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(10)]
sample_points = [[i, (i + 1) ** 2] for i in range(8)]
sample_points_large = [[i, (i + 1) ** 2] for i in range(100)]
sample_points_1d = [[(i + 1) ** 2] for i in range(8)]
sample_points_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(8)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
assert PolyClass.max_polynomial_order == 5
assert (
PolyClass.number_of_crossvalidations == 3
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 4 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.75 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.5
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 10 # Default maximum number of iterations
assert PolyClass.solution_method == "pyomo" # Default solution_method
assert PolyClass.multinomials == 1 # Default multinomials
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
@pytest.mark.unit
def test__init__02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
assert PolyClass.max_polynomial_order == 3
assert (
PolyClass.number_of_crossvalidations == 5
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 6 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.5 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.4
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 20 # Default maximum number of iterations
assert (
PolyClass.solution_method == "mle"
) # Default solution_method, doesn't matter lower / upper characters
assert PolyClass.multinomials == 0 # Default multinomials
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [list])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__03(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [list])
def test__init__04(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__05(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_large)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__06(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_3d)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__07(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_3d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__08(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_1d)
regression_data_input = array_type2(self.sample_points_1d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__09(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=11,
)
assert (
PolyClass.number_of_crossvalidations == 11
) # Default number of cross-validations
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__10(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1.2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__11(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_large)
regression_data_input = array_type2(self.sample_points_large)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=11
)
assert PolyClass.max_polynomial_order == 10
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__12(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__13(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=-1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__14(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__15(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=-1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__16(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
regression_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__17(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=0,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__18(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__19(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__20(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__21(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=15
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__22(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__23(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="idaes",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__24(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
multinomials=3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__25(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=-2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__26(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__27(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
no_adaptive_samples=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__28(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
max_iter=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__29(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
overwrite=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__30(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname="solution.pkl",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__31(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=1,
)
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__32(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
assert PolyClass1.filename == PolyClass2.filename
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__33(self, array_type1, array_type2):
file_name1 = "sol_check1.pickle"
file_name2 = "sol_check2.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name1,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name2,
overwrite=True,
)
assert PolyClass1.filename == file_name1
assert PolyClass2.filename == file_name2
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__34(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolygClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
assert PolyClass1.filename == PolygClass2.filename
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.01,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.99,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_05(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
additional_data_input = np.array(
[
[
i**2,
((i + 1) * 2) + ((j + 1) * 2),
j**4,
((i + 1) * 2) + ((j + 1) ** 2),
]
for i in range(5)
for j in range(5)
]
)
training_data, cross_val_data = PolyClass.training_test_data_creation(
additional_features=additional_data_input
)
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations * 2
assert len(cross_val_data) == PolyClass.number_of_crossvalidations * 2
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert (
training_data["training_extras_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
assert (
cross_val_data["test_extras_" + str(i)].shape[0] == expected_test_size
)
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
concat_02 = np.concatenate(
(
training_data["training_extras_" + str(i)],
cross_val_data["test_extras_" + str(i)],
),
axis=0,
)
additional_data_sorted = additional_data_input[
np.lexsort(
(
additional_data_input[:, 3],
additional_data_input[:, 2],
additional_data_input[:, 1],
additional_data_input[:, 0],
)
)
]
concat_02_sorted = concat_02[
np.lexsort(
(concat_02[:, 3], concat_02[:, 2], concat_02[:, 1], concat_02[:, 0])
)
]
np.testing.assert_equal(additional_data_sorted, concat_02_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_01(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1
)
poly_degree = 1
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 4 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=2
)
poly_degree = 2
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 6 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=10
)
poly_degree = 10
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 22 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] ** 3
expected_output[:, 6] = x_input_train_data[:, 1] ** 3
expected_output[:, 7] = x_input_train_data[:, 0] ** 4
expected_output[:, 8] = x_input_train_data[:, 1] ** 4
expected_output[:, 9] = x_input_train_data[:, 0] ** 5
expected_output[:, 10] = x_input_train_data[:, 1] ** 5
expected_output[:, 11] = x_input_train_data[:, 0] ** 6
expected_output[:, 12] = x_input_train_data[:, 1] ** 6
expected_output[:, 13] = x_input_train_data[:, 0] ** 7
expected_output[:, 14] = x_input_train_data[:, 1] ** 7
expected_output[:, 15] = x_input_train_data[:, 0] ** 8
expected_output[:, 16] = x_input_train_data[:, 1] ** 8
expected_output[:, 17] = x_input_train_data[:, 0] ** 9
expected_output[:, 18] = x_input_train_data[:, 1] ** 9
expected_output[:, 19] = x_input_train_data[:, 0] ** 10
expected_output[:, 20] = x_input_train_data[:, 1] ** 10
expected_output[:, 21] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=10,
multinomials=0,
)
poly_degree = 10
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 21 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] ** 3
expected_output[:, 6] = x_input_train_data[:, 1] ** 3
expected_output[:, 7] = x_input_train_data[:, 0] ** 4
expected_output[:, 8] = x_input_train_data[:, 1] ** 4
expected_output[:, 9] = x_input_train_data[:, 0] ** 5
expected_output[:, 10] = x_input_train_data[:, 1] ** 5
expected_output[:, 11] = x_input_train_data[:, 0] ** 6
expected_output[:, 12] = x_input_train_data[:, 1] ** 6
expected_output[:, 13] = x_input_train_data[:, 0] ** 7
expected_output[:, 14] = x_input_train_data[:, 1] ** 7
expected_output[:, 15] = x_input_train_data[:, 0] ** 8
expected_output[:, 16] = x_input_train_data[:, 1] ** 8
expected_output[:, 17] = x_input_train_data[:, 0] ** 9
expected_output[:, 18] = x_input_train_data[:, 1] ** 9
expected_output[:, 19] = x_input_train_data[:, 0] ** 10
expected_output[:, 20] = x_input_train_data[:, 1] ** 10
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_05(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1
)
poly_degree = 1
additional_term = np.sqrt(x_input_train_data)
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data, additional_term
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = (
6 # New number of features should be = 2 * max_polynomial_order + 4
)
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
expected_output[:, 4] = additional_term[:, 0]
expected_output[:, 5] = additional_term[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc, 1))
expected_value = 6613.875 # Calculated externally as sum(y^2) / 2m
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
expected_value = 90.625 # Calculated externally as sum(dy^2) / 2m
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_03(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
expected_value = 0 # Value should return zero for exact solution
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc,))
expected_value = np.array(
[[-97], [-635], [-635], [-5246.875], [-5246.875], [-3925]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
theta = theta.reshape(
theta.shape[0],
)
expected_value = np.array(
[[12.5], [75], [75], [593.75], [593.75], [437.5]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_03(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
theta = theta.reshape(
theta.shape[0],
)
expected_value = np.array(
[[0], [0], [0], [0], [0], [0]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_bfgs_parameter_optimization_01(self, array_type):
original_data_input = array_type(self.test_data)
# Create x vector for ax2 + bx + c: x data supplied in x_vector
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
x = input_array[:, 0]
y = input_array[:, 1]
x_vector = np.zeros((x.shape[0], 3))
x_vector[:, 0] = (
x[
:,
]
** 2
)
x_vector[:, 1] = x[
:,
]
x_vector[:, 2] = 1
expected_value = np.array([[1.0], [2.0], [1.0]]).reshape(
3,
)
data_feed = PolynomialRegression(
original_data_input,
input_array,
maximum_polynomial_order=5,
solution_method="bfgs",
)
output_1 = data_feed.bfgs_parameter_optimization(x_vector, y)
assert data_feed.solution_method == "bfgs"
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_bfgs_parameter_optimization_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
# Create x vector for ax2 + bx + c: x data supplied in x_vector
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]]).reshape(
6,
)
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=4,
solution_method="bfgs",
)
output_1 = data_feed.bfgs_parameter_optimization(x_vector, y)
assert data_feed.solution_method == "bfgs"
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
def test_mle_estimate_01(self):
# Create x vector for ax2 + bx + c: x data supplied in x_vector
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
x = input_array[:, 0]
y = input_array[:, 1]
x_vector = np.zeros((x.shape[0], 3))
x_vector[:, 0] = (
x[
:,
]
** 2
)
x_vector[:, 1] = x[
:,
]
x_vector[:, 2] = 1
expected_value = np.array([[1.0], [2.0], [1.0]]).reshape(
3,
)
output_1 = PolynomialRegression.MLE_estimate(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_mle_estimate_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]]).reshape(
6,
)
output_1 = PolynomialRegression.MLE_estimate(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
def test_pyomo_optimization_01(self):
x_vector = np.array([[i**2, i, 1] for i in range(10)])
y = np.array([[i**2] for i in range(1, 11)])
expected_value = np.array([[1.0], [2.0], [1.0]])
output_1 = PolynomialRegression.pyomo_optimization(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_pyomo_optimization_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]])
output_1 = PolynomialRegression.pyomo_optimization(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc, 1))
expected_value = 2 * 6613.875 # Calculated externally as sum(y^2) / m
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
expected_value = 2 * 90.625 # Calculated externally as sum(dy^2) / 2m
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
expected_value = 2 * 0 # Value should return zero for exact solution
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
def mock_optimization(self, x, y):
return 10 * np.ones((x.shape[1], 1))
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
@patch.object(PolynomialRegression, "MLE_estimate", mock_optimization)
def test_polyregression_01(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="mle",
)
poly_order = 2
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = 10 * np.ones((6, 1))
output_1, _, _ = data_feed.polyregression(poly_order, training_data, test_data)
np.testing.assert_array_equal(expected_output, output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
@patch.object(
PolynomialRegression, "bfgs_parameter_optimization", mock_optimization
)
def test_polyregression_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="bfgs",
)
poly_order = 2
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = 10 * np.ones((6, 1))
output_1, _, _ = data_feed.polyregression(poly_order, training_data, test_data)
np.testing.assert_array_equal(expected_output, output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
@patch.object(PolynomialRegression, "pyomo_optimization", mock_optimization)
def test_polyregression_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
poly_order = 2
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = 10 * np.ones((6, 1))
output_1, _, _ = data_feed.polyregression(poly_order, training_data, test_data)
np.testing.assert_array_equal(expected_output, output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polyregression_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
poly_order = 10
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = np.Inf
output_1, output_2, output_3 = data_feed.polyregression(
poly_order, training_data, test_data
)
np.testing.assert_array_equal(expected_output, output_1)
np.testing.assert_array_equal(expected_output, output_2)
np.testing.assert_array_equal(expected_output, output_3)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_surrogate_performance_01(self, array_type):
original_data_input = array_type(self.test_data)
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
order_best = 2
phi_best = np.array([[0.0], [0.0], [0.0]])
expected_value_1 = 38.5
expected_value_2 = 2533.3
expected_value_3 = -1.410256
expected_value_4 = 0
data_feed = PolynomialRegression(
original_data_input, input_array, maximum_polynomial_order=5
)
_, output_1, output_2, output_3, output_4 = data_feed.surrogate_performance(
phi_best, order_best
)
assert output_1 == expected_value_1
assert output_2 == expected_value_2
assert np.round(output_3, 4) == | np.round(expected_value_3, 4) | numpy.round |
# encoding: utf8
import unittest
from dynd import nd
import numpy as np
import bcolz
import tables as tb
from datashape import dshape
from datetime import datetime
import os
from blaze.compute.chunks import ChunkIterator, chunks
import pandas as pd
from pandas import DataFrame
from blaze.data import CSV
from blaze.api.into import into, discover
from blaze import Data
from blaze.data import Concat
from blaze.utils import tmpfile, filetext, example
from blaze.pytables import PyTables
import pytest
class TestInto(unittest.TestCase):
def test_containers(self):
self.assertEqual(into([], (1, 2, 3)), [1, 2, 3])
self.assertEqual(into([], iter((1, 2, 3))),
[1, 2, 3])
self.assertEqual(into((), (1, 2, 3)),
(1, 2, 3))
self.assertEqual(into({}, [(1, 2), (3, 4)]),
{1: 2, 3: 4})
self.assertEqual(into((), {1: 2, 3: 4}),
((1, 2), (3, 4)))
self.assertEqual(into((), {'cat': 2, 'dog': 4}),
(('cat', 2), ('dog', 4)))
def test_dynd(self):
self.assertEqual(nd.as_py(into(nd.array(), (1, 2, 3))),
nd.as_py(nd.array([1, 2, 3])))
self.assertEqual(into([], nd.array([1, 2])),
[1, 2])
self.assertEqual(into([], nd.array([[1, 2], [3, 4]])),
[[1, 2], [3, 4]])
def test_numpy(self):
assert (into(np.array(0), [1, 2]) == np.array([1, 2])).all()
assert (into(np.array(0), iter([1, 2])) == np.array([1, 2])).all()
self.assertEqual(into([], np.array([1, 2])),
[1, 2])
def test_numpy_datetime(self):
assert isinstance(into(np.ndarray(0), [datetime(2014, 1, 1)])[0],
np.datetime64)
def test_type(self):
self.assertEqual(into(list, (1, 2, 3)),
into([], (1, 2, 3)))
self.assertEqual(str(into(np.ndarray, (1, 2, 3))),
str(into(np.ndarray(()), (1, 2, 3))))
@pytest.yield_fixture
def h5():
pytest.importorskip('tables')
from tables import IsDescription, UInt8Col, StringCol, open_file
class Test(IsDescription):
posted_dow = UInt8Col(pos=0)
jobtype = UInt8Col(pos=1)
location = UInt8Col(pos=2)
date = StringCol(20, pos=3)
country = StringCol(2, pos=4)
with tmpfile('.h5') as filename:
h5file = open_file(filename, mode="w", title="Test file")
group = h5file.create_group("/", 'test', 'Info')
tab = h5file.create_table(group, 'sample', Test, "Example")
# Insert a new record
tab.append([(3, 1, 0, '20121105', 'ab')])
tab.flush()
yield h5file
h5file.close()
data = [('Alice', 100), ('Bob', 200)]
schema = '{name: string, amount: int}'
data_table = Data(data, '2 * {name: string, amount: int}')
@pytest.fixture
def cds():
pytest.importorskip('bokeh')
from bokeh.objects import ColumnDataSource
cds = ColumnDataSource({
'id': [1, 2, 3],
'name': ['Alice', 'Bob', 'Charlie'],
'amount': [100, 200, 300],
'timestamp': [datetime(2000, 12, 25, 0, 0, 1),
datetime(2001, 12, 25, 0, 0, 1),
datetime(2002, 12, 25, 0, 0, 1)]
})
return cds
@pytest.yield_fixture
def good_csv():
with tmpfile(".csv") as filename:
with open(filename, mode='w') as f:
# Insert a new record
f.write("userid,text,country\n")
f.write("1,Alice,az\n")
f.write("2,Bob,bl\n")
f.write("3,Charlie,cz\n")
yield filename
@pytest.yield_fixture
def bad_csv_df():
with tmpfile(".csv") as filename:
with open(filename, mode='w') as badfile:
# Insert a new record
badfile.write("userid,text,country\n")
badfile.write("1,Alice,az\n")
badfile.write("2,Bob,bl\n")
for i in range(100):
badfile.write("%d,badguy,zz\n" % i)
badfile.write("4,Dan,gb,extra,extra\n")
yield filename
@pytest.yield_fixture
def out_hdf5():
pytest.importorskip('tables')
with tmpfile(".h5") as filename:
yield filename
@pytest.yield_fixture
def out_hdf5_alt():
pytest.importorskip('tables')
with tmpfile(".h5") as filename:
yield filename
class A(object): pass
class B(object): pass
def test_into_fails():
with pytest.raises(NotImplementedError):
into(A(), B())
def test_into_pytables_dataframe(h5):
samp = h5.root.test.sample
final = into(pd.DataFrame, samp)
assert len(final) == 1
def test_pandas_dynd():
arr = nd.array(data, dtype=schema)
result = into(DataFrame, arr)
expected = DataFrame(data, columns=['name', 'amount'])
assert str(result) == str(expected)
nda = nd.array([[1,2,3], [4,5,6], [7,8,9]])
csv = CSV(example('accounts.csv'))
df_csv = into(DataFrame, csv)
df_nd = into(df_csv, nda)
df_no_names = into(DataFrame, nda)
assert list(df_nd.columns) == list(df_csv.columns)
assert list(df_no_names.columns) == [0,1,2]
def test_pandas_numpy():
dtype = [('name', 'O'), ('amount', int)]
x = np.array(data, dtype=dtype)
result = into(DataFrame(), x)
expected = DataFrame(data, columns=['name', 'amount'])
assert str(result) == str(expected)
result = into(DataFrame(columns=['name', 'amount']), x)
expected = DataFrame(data, columns=['name', 'amount'])
assert str(result) == str(expected)
def test_pandas_seq():
assert str(into(DataFrame, [1, 2])) == str(DataFrame([1, 2]))
assert str(into(DataFrame, (1, 2))) == str(DataFrame([1, 2]))
assert (str(into(DataFrame(columns=['a', 'b']), [(1, 2), (3, 4)])) ==
str(DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])))
def test_pandas_pandas():
df = DataFrame(data, columns=['name', 'balance'])
new_df = into(DataFrame, df)
# Data must be the same
assert np.all(new_df == df)
# new_df should be a copy of df
assert id(new_df) != id(df)
def test_DataFrame_Series():
df = DataFrame(data, columns=['name', 'balance'])
new_df = into(DataFrame, df['name'])
assert np.all(new_df == DataFrame([['Alice'], ['Bob']], columns=['name']))
# new_df should be a copy of df
assert id(new_df) != id(df['name'])
assert isinstance(new_df, DataFrame)
def test_discover_ndarray():
arr = nd.array(data, dtype=schema)
assert discover(arr) == 2 * dshape(schema)
def test_discover_pandas():
df = DataFrame(data, columns=['name', 'balance'])
assert discover(df).subshape[0] == dshape(schema)
def test_discover_pandas():
df = DataFrame(data, columns=['name', 'balance'])
result = into(nd.array, df)
assert nd.as_py(result, tuple=True) == data
def test_into_table_dataframe():
t = data_table
assert list(into(DataFrame(), t).columns) == list(t.fields)
assert into([], into(DataFrame(), t)) == list(map(tuple, data))
def test_Column_data_source():
pytest.importorskip('bokeh')
from bokeh.objects import ColumnDataSource
cds = into(ColumnDataSource(), data_table)
assert isinstance(cds, ColumnDataSource)
assert set(cds.column_names) == set(data_table.fields)
def test_into_ColumnDataSource_pytables():
pytest.importorskip('bokeh')
from bokeh.objects import ColumnDataSource
pyt = PyTables(example('accounts.h5'), '/accounts')
cds = into(ColumnDataSource, pyt)
assert 'balance' and 'id' and 'name' in cds.column_names
def test_numpy_list():
dtype = into(np.ndarray, data).dtype
assert np.issubdtype(dtype[0], object)
assert np.issubdtype(dtype[1], int)
assert into([], into(np.ndarray, data)) == data
def test_numpy_table_expr():
t = Data(data, 'var * {name: string, amount: int64}')
assert (into(np.ndarray, t).dtype ==
| np.dtype([('name', 'O'), ('amount', 'i8')]) | numpy.dtype |
#!/usr/bin/env python
# coding=utf-8
"""
Basic visualization functions
| Option | Description |
| ------ | ----------- |
| title | viz.py |
| authors | <NAME>, <NAME>, <NAME>, <NAME> |
| date | 2020-03-18 |
"""
from copy import copy
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mne
import meshio
def transform(locs: np.ndarray,traX: float=0.15, traY: float=0, traZ: float=0.5, rotY: float=(np.pi)/2, rotZ: float=(np.pi)/2) -> np.ndarray:
"""
Calculates new locations for the EEG locations.
Arguments:
locs: array of shape (n_sensors, 3)
3d coordinates of the sensors
traX: float
X translation to apply to the sensors
traY: float
Y translation to apply to the sensors
traZ: float
Z translation to apply to the sensors
rotY: float
Y rotation to apply to the sensors
rotZ: float
Z rotation to apply to the sensors
Returns:
result: array (n_sensors, 3)
new 3d coordinates of the sensors
"""
# Z rotation
newX = locs[:, 0] * np.cos(rotZ) - locs[:, 1] * np.sin(rotZ)
newY = locs[:, 0] * | np.sin(rotZ) | numpy.sin |
import numpy
import pytest
from numpy.testing import assert_array_almost_equal_nulp
from shapely.geometry import Point
from generator import geometry, optimal_path
def test_dilate_and_unify_object_bounds():
bounds = [
{'x': -1.0, 'z': -1.0},
{'x': -1.0, 'z': 1.0},
{'x': 1.0, 'z': 1.0},
{'x': 1.0, 'z': -1.0}
]
output = optimal_path._dilate_and_unify_object_bounds(
[bounds],
0.5,
Point(0, -4),
Point(0, 4)
)
assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[
(-1.0, -1.5), (-1.5, -1.0), (-1.5, 1.0), (-1.0, 1.5), (1.0, 1.5),
(1.5, 1.0), (1.5, -1.0), (1.0, -1.5)
]]))
output = optimal_path._dilate_and_unify_object_bounds(
[bounds],
1,
Point(0, -4),
Point(0, 4)
)
assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[
(-1.0, -2), (-2, -1.0), (-2, 1.0), (-1.0, 2), (1.0, 2),
(2, 1.0), (2, -1.0), (1.0, -2)
]]))
# Will not dilate if source is inside bounds.
output = optimal_path._dilate_and_unify_object_bounds(
[bounds],
0.5,
Point(0, -1.25),
Point(0, 4)
)
assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[
(-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)
]]))
# Will not dilate if target is inside bounds.
output = optimal_path._dilate_and_unify_object_bounds(
[bounds],
0.5,
Point(0, -4),
Point(0, 1.25)
)
assert_array_almost_equal_nulp(numpy.array(output), numpy.array([[
(-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)
]]))
def test_dilate_and_unify_object_bounds_multiple_poly():
bounds_1 = [
{'x': -1.0, 'z': -1.0},
{'x': -1.0, 'z': 1.0},
{'x': 1.0, 'z': 1.0},
{'x': 1.0, 'z': -1.0}
]
bounds_2 = [
{'x': -4.0, 'z': -1.0},
{'x': -4.0, 'z': 1.0},
{'x': -3.0, 'z': 1.0},
{'x': -3.0, 'z': -1.0}
]
bounds_3 = [
{'x': 3.0, 'z': -1.0},
{'x': 3.0, 'z': 1.0},
{'x': 4.0, 'z': 1.0},
{'x': 4.0, 'z': -1.0}
]
output = optimal_path._dilate_and_unify_object_bounds(
[bounds_1, bounds_2, bounds_3],
0.5,
Point(0, -4),
Point(0, 4)
)
assert len(output) == 3
assert_array_almost_equal_nulp(numpy.array(output[0]), numpy.array([
(-4.0, -1.5), (-4.5, -1.0), (-4.5, 1.0), (-4.0, 1.5), (-3.0, 1.5),
(-2.5, 1.0), (-2.5, -1.0), (-3.0, -1.5)
]))
assert_array_almost_equal_nulp(numpy.array(output[1]), numpy.array([
(3.0, -1.5), (2.5, -1.0), (2.5, 1.0), (3.0, 1.5), (4.0, 1.5),
(4.5, 1.0), (4.5, -1.0), (4.0, -1.5)
]))
assert_array_almost_equal_nulp(numpy.array(output[2]), numpy.array([
(-1.0, -1.5), (-1.5, -1.0), (-1.5, 1.0), (-1.0, 1.5), (1.0, 1.5),
(1.5, 1.0), (1.5, -1.0), (1.0, -1.5)
]))
bounds_4 = [
{'x': 1.0, 'z': -1.0},
{'x': 1.0, 'z': 1.0},
{'x': 3.0, 'z': 1.0},
{'x': 3.0, 'z': -1.0}
]
output = optimal_path._dilate_and_unify_object_bounds(
[bounds_1, bounds_2, bounds_3, bounds_4],
0.5,
Point(0, -4),
Point(0, 4)
)
assert len(output) == 2
assert_array_almost_equal_nulp(numpy.array(output[0]), numpy.array([
(-1.0, -1.5), (-1.5, -1.0), (-1.5, 1.0), (-1.0, 1.5), (1.0, 1.5),
(3.0, 1.5), (4.0, 1.5), (4.5, 1.0), (4.5, -1.0), (4.0, -1.5),
(3.0, -1.5), (1.0, -1.5)
]))
assert_array_almost_equal_nulp(numpy.array(output[1]), numpy.array([
(-4.0, -1.5), (-4.5, -1.0), (-4.5, 1.0), (-4.0, 1.5), (-3.0, 1.5),
(-2.5, 1.0), (-2.5, -1.0), (-3.0, -1.5)
]))
def test_dilate_target_bounds():
output = optimal_path._dilate_target_bounds([
{'x': -1.0, 'z': -1.0},
{'x': -1.0, 'z': 1.0},
{'x': 1.0, 'z': 1.0},
{'x': 1.0, 'z': -1.0}
])
assert_array_almost_equal_nulp( | numpy.array(output) | numpy.array |
from __future__ import print_function
from pyorbit.classes.common import *
from pyorbit.classes.model_container_multinest import ModelContainerMultiNest
from pyorbit.classes.model_container_polychord import ModelContainerPolyChord
from pyorbit.classes.model_container_emcee import ModelContainerEmcee
from pyorbit.classes.input_parser import pars_input
from pyorbit.classes.io_subroutines import *
import numpy as np
import os
import matplotlib as mpl
import sys
mpl.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import AutoMinorLocator
import corner
import pyorbit.classes.kepler_exo as kepler_exo
import pyorbit.classes.common as common
import pyorbit.classes.results_analysis as results_analysis
import h5py
import csv
import re
__all__ = ["pyorbit_getresults"]
def pyorbit_getresults(config_in, sampler, plot_dictionary):
try:
use_tex = config_in['parameters']['use_tex']
except:
use_tex = True
if use_tex is False:
print(' LaTeX disabled')
if plot_dictionary['use_getdist']:
from getdist import plots, MCSamples
# plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})
plt.rcParams["font.family"] = "Times New Roman"
plt.rc('text', usetex=use_tex)
sample_keyword = {
'multinest': ['multinest', 'MultiNest', 'multi'],
'polychord': ['polychord', 'PolyChord', 'polychrod', 'poly'],
'emcee': ['emcee', 'MCMC', 'Emcee']
}
if sampler in sample_keyword['emcee']:
dir_input = './' + config_in['output'] + '/emcee/'
dir_output = './' + config_in['output'] + '/emcee_plot/'
os.system('mkdir -p ' + dir_output)
mc, starting_point, population, prob, state, \
sampler_chain, sampler_lnprobability, sampler_acceptance_fraction, _ = \
emcee_load_from_cpickle(dir_input)
pars_input(config_in, mc, reload_emcee=True)
if hasattr(mc.emcee_parameters, 'version'):
emcee_version = mc.emcee_parameters['version'][0]
else:
import emcee
emcee_version = emcee.__version__[0]
mc.model_setup()
""" Required to create the right objects inside each class - if defined inside """
theta_dictionary = results_analysis.get_theta_dictionary(mc)
nburnin = int(mc.emcee_parameters['nburn'])
nthin = int(mc.emcee_parameters['thin'])
nsteps = int(sampler_chain.shape[1] * nthin)
flat_chain = emcee_flatchain(sampler_chain, nburnin, nthin)
flat_lnprob = emcee_flatlnprob(sampler_lnprobability, nburnin, nthin, emcee_version)
flat_BiC = -2 * flat_lnprob + mc.ndim * np.log(mc.ndata)
lnprob_med = common.compute_value_sigma(flat_lnprob)
chain_med = common.compute_value_sigma(flat_chain)
chain_MAP, lnprob_MAP = common.pick_MAP_parameters(flat_chain, flat_lnprob)
n_samplings, n_pams = np.shape(flat_chain)
print()
print('emcee version: ', emcee.__version__)
if mc.emcee_parameters['version'] == '2':
print('WARNING: upgrading to version 3 is strongly advised')
print()
print(' Reference Time Tref: {}'.format(mc.Tref))
print()
print(' Dimensions = {}'.format(mc.ndim))
print(' Nwalkers = {}'.format(mc.emcee_parameters['nwalkers']))
print()
print(' Steps: {}'.format(nsteps))
results_analysis.print_integrated_ACF(sampler_chain, theta_dictionary, nthin)
if sampler in sample_keyword['multinest']:
plot_dictionary['lnprob_chain'] = False
plot_dictionary['chains'] = False
plot_dictionary['traces'] = False
dir_input = './' + config_in['output'] + '/multinest/'
dir_output = './' + config_in['output'] + '/multinest_plot/'
os.system('mkdir -p ' + dir_output)
mc = nested_sampling_load_from_cpickle(dir_input)
mc.model_setup()
mc.initialize_logchi2()
results_analysis.results_resumen(mc, None, skip_theta=True)
""" Required to create the right objects inside each class - if defined inside """
theta_dictionary = results_analysis.get_theta_dictionary(mc)
data_in = np.genfromtxt(dir_input + 'post_equal_weights.dat')
flat_lnprob = data_in[:, -1]
flat_chain = data_in[:, :-1]
# nsample = np.size(flat_lnprob)
n_samplings, n_pams = np.shape(flat_chain)
lnprob_med = common.compute_value_sigma(flat_lnprob)
chain_med = common.compute_value_sigma(flat_chain)
chain_MAP, lnprob_MAP = common.pick_MAP_parameters(flat_chain, flat_lnprob)
print()
print(' Reference Time Tref: {}'.format(mc.Tref))
print()
print(' Dimensions: {}'.format(mc.ndim))
print()
print(' Samples: {}'.format(n_samplings))
if sampler in sample_keyword['polychord']:
plot_dictionary['lnprob_chain'] = False
plot_dictionary['chains'] = False
plot_dictionary['traces'] = False
dir_input = './' + config_in['output'] + '/polychord/'
dir_output = './' + config_in['output'] + '/polychord_plot/'
os.system('mkdir -p ' + dir_output)
mc = nested_sampling_load_from_cpickle(dir_input)
# pars_input(config_in, mc)
mc.model_setup()
mc.initialize_logchi2()
results_analysis.results_resumen(mc, None, skip_theta=True)
""" Required to create the right objects inside each class - if defined inside """
theta_dictionary = results_analysis.get_theta_dictionary(mc)
data_in = np.genfromtxt(dir_input + 'pyorbit_equal_weights.txt')
flat_lnprob = data_in[:, 1]
flat_chain = data_in[:, 2:]
# nsample = np.size(flat_lnprob)
n_samplings, n_pams = np.shape(flat_chain)
lnprob_med = common.compute_value_sigma(flat_lnprob)
chain_med = common.compute_value_sigma(flat_chain)
chain_MAP, lnprob_MAP = common.pick_MAP_parameters(flat_chain, flat_lnprob)
print()
print(' Reference Time Tref: {}'.format(mc.Tref))
print()
print(' Dimensions: {}'.format(mc.ndim))
print()
print(' Samples: {}'.format(n_samplings))
print()
print(' LN posterior: {0:12f} {1:12f} {2:12f} (15-84 p) '.format(lnprob_med[0], lnprob_med[2], lnprob_med[1]))
MAP_log_priors, MAP_log_likelihood = mc.log_priors_likelihood(chain_MAP)
BIC = -2.0 * MAP_log_likelihood + np.log(mc.ndata) * mc.ndim
AIC = -2.0 * MAP_log_likelihood + 2.0 * mc.ndim
AICc = AIC + (2.0 + 2.0 * mc.ndim) * mc.ndim / (mc.ndata - mc.ndim - 1.0)
# AICc for small sample
print()
print(' MAP log_priors = {}'.format(MAP_log_priors))
print(' MAP log_likelihood = {}'.format(MAP_log_likelihood))
print(' MAP BIC (using likelihood) = {}'.format(BIC))
print(' MAP AIC (using likelihood) = {}'.format(AIC))
print(' MAP AICc (using likelihood) = {}'.format(AICc))
MAP_log_posterior = MAP_log_likelihood + MAP_log_priors
BIC = -2.0 * MAP_log_posterior + np.log(mc.ndata) * mc.ndim
AIC = -2.0 * MAP_log_posterior + 2.0 * mc.ndim
AICc = AIC + (2.0 + 2.0 * mc.ndim) * mc.ndim / (mc.ndata - mc.ndim - 1.0)
print()
print(' MAP BIC (using posterior) = {}'.format(BIC))
print(' MAP AIC (using posterior) = {}'.format(AIC))
print(' MAP AICc (using posterior) = {}'.format(AICc))
if mc.ndata < 40 * mc.ndim:
print()
print(' AICc suggested over AIC because NDATA ( {0:12f} ) < 40 * NDIM ( {1:12f} )'.format(mc.ndata, mc.ndim))
else:
print()
print(' AIC suggested over AICs because NDATA ( {0:12f} ) > 40 * NDIM ( {1:12f} )'.format(mc.ndata, mc.ndim))
print()
print('****************************************************************************************************')
print('****************************************************************************************************')
print()
print(' Confidence intervals (median value, 34.135th percentile from the median on the left and right side)')
planet_variables = results_analysis.results_resumen(mc, flat_chain, chain_med=chain_MAP, return_samples=True)
print()
print('****************************************************************************************************')
print()
print(' Parameters corresponding to the Maximum a Posteriori probability ( {} )'.format(lnprob_MAP))
print()
results_analysis.results_resumen(mc, chain_MAP)
print()
print('****************************************************************************************************')
print()
# Computation of all the planetary variables
planet_variables_med = results_analysis.get_planet_variables(mc, chain_med[:, 0])
star_variables = results_analysis.get_stellar_parameters(mc, chain_med[:, 0])
planet_variables_MAP = results_analysis.get_planet_variables(mc, chain_MAP)
star_variables_MAP = results_analysis.get_stellar_parameters(mc, chain_MAP)
if plot_dictionary['lnprob_chain'] or plot_dictionary['chains']:
print(' Plot FLAT chain ')
if emcee_version == '2':
fig = plt.figure(figsize=(12, 12))
plt.xlabel('$\ln \mathcal{L}$')
plt.plot(sampler_lnprobability.T, '-', alpha=0.5)
plt.axhline(lnprob_med[0])
plt.axvline(nburnin / nthin, c='r')
plt.savefig(dir_output + 'LNprob_chain.png', bbox_inches='tight', dpi=300)
plt.close(fig)
else:
fig = plt.figure(figsize=(12, 12))
plt.xlabel('$\ln \mathcal{L}$')
plt.plot(sampler_lnprobability, '-', alpha=0.5)
plt.axhline(lnprob_med[0])
plt.axvline(nburnin / nthin, c='r')
plt.savefig(dir_output + 'LNprob_chain.png', bbox_inches='tight', dpi=300)
plt.close(fig)
print()
print('****************************************************************************************************')
print()
if plot_dictionary['full_correlation']:
corner_plot = {
'samples': np.zeros([np.size(flat_chain, axis=0), np.size(flat_chain, axis=1) + 1]),
'labels': [],
'truths': []
}
i_corner = 0
for var, var_dict in theta_dictionary.items():
corner_plot['samples'][:, i_corner] = flat_chain[:, var_dict]
corner_plot['labels'].append(re.sub('_', '-', var))
corner_plot['truths'].append(chain_med[var_dict, 0])
i_corner += 1
corner_plot['samples'][:, -1] = flat_lnprob[:]
corner_plot['labels'].append('ln-prob')
corner_plot['truths'].append(lnprob_med[0])
if plot_dictionary['use_getdist']:
print(' Plotting full_correlation plot with GetDist')
print()
print(' Ignore the no burn in error warning from getdist')
print(' since burn in has been already removed from the chains')
plt.rc('text', usetex=False)
samples = MCSamples(samples=corner_plot['samples'], names=corner_plot['labels'],
labels=corner_plot['labels'])
g = plots.getSubplotPlotter()
g.settings.num_plot_contours = 6
g.triangle_plot(samples, filled=True)
g.export(dir_output + "all_internal_variables_corner_getdist.pdf")
print()
else:
# plotting mega-corner plot
print('Plotting full_correlation plot with Corner')
plt.rc('text', usetex=False)
fig = corner.corner(corner_plot['samples'], labels=corner_plot['labels'], truths=corner_plot['truths'])
fig.savefig(dir_output + "all_internal_variables_corner_dfm.pdf", bbox_inches='tight', dpi=300)
plt.close(fig)
plt.rc('text', usetex=use_tex)
print()
print('****************************************************************************************************')
print()
if plot_dictionary['chains']:
print(' Plotting the chains... ')
os.system('mkdir -p ' + dir_output + 'chains')
for theta_name, ii in theta_dictionary.items():
file_name = dir_output + 'chains/' + repr(ii) + '_' + theta_name + '.png'
fig = plt.figure(figsize=(12, 12))
plt.plot(sampler_chain[:, :, ii].T, '-', alpha=0.5)
plt.axvline(nburnin / nthin, c='r')
plt.savefig(file_name, bbox_inches='tight', dpi=300)
plt.close(fig)
print()
print('****************************************************************************************************')
print()
if plot_dictionary['traces']:
print(' Plotting the Gelman-Rubin traces... ')
print()
"""
Gelman-Rubin traces are stored in the dedicated folder iniside the _plot folder
Note that the GR statistics is not robust because the wlakers are not independent
"""
os.system('mkdir -p ' + dir_output + 'gr_traces')
step_sampling = np.arange(nburnin / nthin, nsteps / nthin, 1, dtype=int)
for theta_name, th in theta_dictionary.items():
rhat = np.array([GelmanRubin_v2(sampler_chain[:, :steps, th]) for steps in step_sampling])
print(' Gelman-Rubin: {0:5d} {1:12f} {2:s} '.format(th, rhat[-1], theta_name))
file_name = dir_output + 'gr_traces/v2_' + repr(th) + '_' + theta_name + '.png'
fig = plt.figure(figsize=(12, 12))
plt.plot(step_sampling, rhat[:], '-', color='k')
plt.axhline(1.01, c='C0')
plt.savefig(file_name, bbox_inches='tight', dpi=300)
plt.close(fig)
print()
print('****************************************************************************************************')
print()
if plot_dictionary['common_corner']:
print(' Plotting the common models corner plots')
plt.rc('text', usetex=False)
for common_name, common_model in mc.common_models.items():
print(' Common model: ', common_name)
corner_plot = {
'var_list': [],
'samples': [],
'labels': [],
'truths': []
}
variable_values = common_model.convert(flat_chain)
variable_median = common_model.convert(chain_med[:, 0])
if len(variable_median) < 1.:
continue
"""
Check if the eccentricity and argument of pericenter were set as free parameters or fixed by simply
checking the size of their distribution
"""
for var in variable_values.keys():
if np.size(variable_values[var]) == 1:
variable_values[var] = variable_values[var] * np.ones(n_samplings)
else:
corner_plot['var_list'].append(var)
corner_plot['samples'] = []
corner_plot['labels'] = []
corner_plot['truths'] = []
for var_i, var in enumerate(corner_plot['var_list']):
corner_plot['samples'].extend([variable_values[var]])
corner_plot['labels'].append(var)
corner_plot['truths'].append(variable_median[var])
""" Check if the semi-amplitude K is among the parameters that have been fitted.
If so, it computes the correpsing planetary mass with uncertainty """
fig = corner.corner(np.asarray(corner_plot['samples']).T, labels=corner_plot['labels'],
truths=corner_plot['truths'])
fig.savefig(dir_output + common_name + "_corners.pdf", bbox_inches='tight', dpi=300)
plt.close(fig)
print()
print('****************************************************************************************************')
print()
if plot_dictionary['dataset_corner']:
print(' Dataset + models corner plots ')
print()
for dataset_name, dataset in mc.dataset_dict.items():
for model_name in dataset.models:
variable_values = dataset.convert(flat_chain)
variable_median = dataset.convert(chain_med[:, 0])
for common_ref in mc.models[model_name].common_ref:
variable_values.update(mc.common_models[common_ref].convert(flat_chain))
variable_median.update(mc.common_models[common_ref].convert(chain_med[:, 0]))
variable_values.update(mc.models[model_name].convert(flat_chain, dataset_name))
variable_median.update(mc.models[model_name].convert(chain_med[:, 0], dataset_name))
corner_plot['samples'] = []
corner_plot['labels'] = []
corner_plot['truths'] = []
for var_i, var in enumerate(variable_values):
if np.size(variable_values[var]) <= 1: continue
corner_plot['samples'].extend([variable_values[var]])
corner_plot['labels'].append(var)
corner_plot['truths'].append(variable_median[var])
fig = corner.corner(np.asarray(corner_plot['samples']).T,
labels=corner_plot['labels'], truths=corner_plot['truths'])
fig.savefig(dir_output + dataset_name + '_' + model_name + "_corners.pdf", bbox_inches='tight', dpi=300)
plt.close(fig)
print(' Dataset: ', dataset_name, ' model: ', model_name, ' corner plot done ')
print()
print('****************************************************************************************************')
print()
if plot_dictionary['write_planet_samples']:
print(' Saving the planet variable samplings to files (with plots)')
samples_dir = dir_output + '/planet_samples/'
os.system('mkdir -p ' + samples_dir)
for common_ref, variable_values in planet_variables.items():
for variable_name, variable in variable_values.items():
rad_filename = samples_dir + common_ref + '_' + variable_name
fileout = open(rad_filename + '.dat', 'w')
for val in variable:
fileout.write('{0:f} \n'.format(val))
fileout.close()
fig = plt.figure(figsize=(10, 10))
plt.hist(variable, bins=50, color='C0', alpha=0.75, zorder=0)
perc0, perc1, perc2 = np.percentile(variable, [15.865, 50, 84.135], axis=0)
plt.axvline(planet_variables_med[common_ref][variable_name], color='C1', zorder=1,
label='Median-corresponding value')
plt.axvline(planet_variables_MAP[common_ref][variable_name], color='C2', zorder=1,
label='MAP-corresponding value')
plt.axvline(perc1, color='C3', zorder=2, label='Median of the distribution')
plt.axvline(perc0, color='C4', zorder=2, label='15.865th and 84.135th percentile')
plt.axvline(perc2, color='C4', zorder=2)
plt.xlabel(re.sub('_', '-', variable_name + '_' + common_ref))
plt.legend()
plt.ticklabel_format(useOffset=False)
plt.savefig(rad_filename + '.png', bbox_inches='tight', dpi=300)
plt.close(fig)
print()
print('****************************************************************************************************')
print()
if plot_dictionary['plot_models'] or plot_dictionary['write_models']:
print(' Computing the models for plot/data writing ')
bjd_plot = {
'full': {
'start': None, 'end': None, 'range': None
}
}
kinds = {}
P_minimum = 2.0 # this temporal range will be divided in 20 subsets
for key_name, key_val in planet_variables_med.items():
P_minimum = min(key_val.get('P', 2.0), P_minimum)
for dataset_name, dataset in mc.dataset_dict.items():
if dataset.kind in kinds.keys():
kinds[dataset.kind].extend([dataset_name])
else:
kinds[dataset.kind] = [dataset_name]
bjd_plot[dataset_name] = {
'start': np.amin(dataset.x),
'end': np.amax(dataset.x),
'range': np.amax(dataset.x) - np.amin(dataset.x),
}
if bjd_plot[dataset_name]['range'] < 0.1: bjd_plot[dataset_name]['range'] = 0.1
bjd_plot[dataset_name]['start'] -= bjd_plot[dataset_name]['range'] * 0.10
bjd_plot[dataset_name]['end'] += bjd_plot[dataset_name]['range'] * 0.10
if dataset.kind == 'Phot':
step_size = np.min(bjd_plot[dataset_name]['range'] / dataset.n / 10.)
else:
step_size = P_minimum / 20.
bjd_plot[dataset_name]['x_plot'] = \
np.arange(bjd_plot[dataset_name]['start'], bjd_plot[dataset_name]['end'], step_size)
if bjd_plot['full']['range']:
bjd_plot['full']['start'] = min(bjd_plot['full']['start'], np.amin(dataset.x))
bjd_plot['full']['end'] = max(bjd_plot['full']['end'], np.amax(dataset.x))
bjd_plot['full']['range'] = bjd_plot['full']['end'] - bjd_plot['full']['start']
else:
bjd_plot['full']['start'] = np.amin(dataset.x)
bjd_plot['full']['end'] = np.amax(dataset.x)
bjd_plot['full']['range'] = bjd_plot['full']['end'] - bjd_plot['full']['start']
bjd_plot['full']['start'] -= bjd_plot['full']['range'] * 0.10
bjd_plot['full']['end'] += bjd_plot['full']['range'] * 0.10
bjd_plot['full']['x_plot'] = np.arange(bjd_plot['full']['start'], bjd_plot['full']['end'], P_minimum / 20.)
for dataset_name, dataset in mc.dataset_dict.items():
if dataset.kind == 'RV':
bjd_plot[dataset_name] = bjd_plot['full']
bjd_plot['model_out'], bjd_plot['model_x'] = results_analysis.get_model(mc, chain_med[:, 0], bjd_plot)
bjd_plot['MAP_model_out'], bjd_plot['MAP_model_x'] = results_analysis.get_model(mc, chain_MAP, bjd_plot)
if plot_dictionary['plot_models']:
print(' Writing the plots ')
for kind_name, kind in kinds.items():
for dataset_name in kind:
try:
error_bars = np.sqrt(mc.dataset_dict[dataset_name].e**2
+ bjd_plot['model_out'][dataset_name]['jitter']**2)
except ValueError:
error_bars = mc.dataset_dict[dataset_name].e
fig = plt.figure(figsize=(12, 12))
# Partially taken from here:
# http://www.sc.eso.org/~bdias/pycoffee/codes/20160407/gridspec_demo.html
gs = gridspec.GridSpec(2, 1, height_ratios=[3.0, 1.0])
# Also make sure the margins and spacing are apropriate
gs.update(left=0.3, right=0.95, bottom=0.08, top=0.93, wspace=0.15, hspace=0.05)
ax_0 = plt.subplot(gs[0])
ax_1 = plt.subplot(gs[1], sharex=ax_0)
# Adding minor ticks only to x axis
minorLocator = AutoMinorLocator()
ax_0.xaxis.set_minor_locator(minorLocator)
ax_1.xaxis.set_minor_locator(minorLocator)
# Disabling the offset on top of the plot
ax_0.ticklabel_format(useOffset=False)
ax_1.ticklabel_format(useOffset=False)
ax_0.scatter(mc.dataset_dict[dataset_name].x,
mc.dataset_dict[dataset_name].y
- bjd_plot['model_out'][dataset_name]['systematics']
- bjd_plot['model_out'][dataset_name]['time_independent'],
color='C0', zorder=4, s=16)
ax_0.errorbar(mc.dataset_dict[dataset_name].x,
mc.dataset_dict[dataset_name].y
- bjd_plot['model_out'][dataset_name]['systematics']
- bjd_plot['model_out'][dataset_name]['time_independent'],
yerr=error_bars,
color='C0', fmt='o', ms=0, zorder=3, alpha=0.5)
ax_0.plot(bjd_plot[dataset_name]['x_plot'], bjd_plot['model_x'][dataset_name]['complete'],
label='Median-corresponding model',
color='C1', zorder=2)
ax_0.plot(bjd_plot[dataset_name]['x_plot'], bjd_plot['MAP_model_x'][dataset_name]['complete'],
label='MAP-corresponding model',
color='C2', zorder=1)
ax_0.set_ylabel('Same as input data')
ax_0.legend()
ax_1.scatter(mc.dataset_dict[dataset_name].x,
mc.dataset_dict[dataset_name].y - bjd_plot['model_out'][dataset_name]['complete'],
color='C0', zorder=4, s=16)
ax_1.errorbar(mc.dataset_dict[dataset_name].x,
mc.dataset_dict[dataset_name].y - bjd_plot['model_out'][dataset_name]['complete'],
yerr=error_bars,
color='C0', fmt='o', ms=0, zorder=3, alpha=0.5)
ax_1.axhline(0.0, color='k', alpha=0.5, zorder=0)
ax_1.set_xlabel('Time [d] (offset as the input data)')
ax_1.set_ylabel('Residuals (wrt median model)')
plt.savefig(dir_output + 'model_' + kind_name + '_' + dataset_name + '.png', bbox_inches='tight',
dpi=300)
plt.close(fig)
if plot_dictionary['write_models']:
for prepend_keyword in ['', 'MAP_']:
print(' Writing the ', prepend_keyword, 'data files ')
plot_out_keyword = prepend_keyword + 'model_out'
plot_x_keyword = prepend_keyword + 'model_x'
file_keyword = prepend_keyword + 'model_files'
if prepend_keyword == '':
planet_vars = planet_variables_med
# star_vars = star_variables # leaving here, it could be useful for the future
chain_ref = chain_med[:, 0]
elif prepend_keyword == 'MAP_':
planet_vars = planet_variables_MAP
# star_vars = star_variables_MAP
chain_ref = chain_MAP
dir_models = dir_output + file_keyword + '/'
os.system('mkdir -p ' + dir_models)
for dataset_name, dataset in mc.dataset_dict.items():
for model_name in dataset.models:
if getattr(mc.models[model_name], 'systematic_model', False):
continue
fileout = open(dir_models + dataset_name + '_' + model_name + '.dat', 'w')
phase = np.zeros(dataset.n)
tc_folded = np.zeros(dataset.n)
phase_plot = np.zeros(np.size(bjd_plot[dataset_name]['x_plot']))
tc_folded_plot = np.zeros(np.size(bjd_plot[dataset_name]['x_plot']))
for common_ref in mc.models[model_name].common_ref:
if common_ref in planet_vars:
if 'P' in planet_vars[common_ref]:
phase = (dataset.x0 / planet_vars[common_ref]['P']) % 1
phase_plot = ((bjd_plot[dataset_name]['x_plot'] - mc.Tref) /
planet_vars[common_ref]['P']) % 1
if 'Tc' in planet_vars[common_ref]:
tc_folded = (dataset.x - planet_vars[common_ref]['Tc']
+ planet_vars[common_ref]['P'] / 2.) \
% planet_vars[common_ref]['P'] \
- planet_vars[common_ref]['P'] / 2.
tc_folded_plot = (bjd_plot[dataset_name]['x_plot'] - planet_vars[common_ref][
'Tc']
+ planet_vars[common_ref]['P'] / 2.) \
% planet_vars[common_ref]['P'] \
- planet_vars[common_ref]['P'] / 2.
else:
tc_folded = dataset.x0 % planet_vars[common_ref]['P']
tc_folded_plot = (bjd_plot[dataset_name]['x_plot'] - mc.Tref) % \
planet_vars[common_ref]['P']
fileout.write('descriptor BJD Tc_folded pha val,+- sys mod full val_compare,+- res,+- jit \n')
try:
len(bjd_plot[plot_out_keyword][dataset_name][model_name])
except:
bjd_plot[plot_out_keyword][dataset_name][model_name] = \
bjd_plot[plot_out_keyword][dataset_name][model_name] * np.ones(dataset.n)
bjd_plot[plot_x_keyword][dataset_name][model_name] = \
bjd_plot[plot_x_keyword][dataset_name][model_name] * np.ones(dataset.n)
for x, tcf, pha, y, e, sys, mod, com, obs_mod, res, jit in zip(
dataset.x, tc_folded, phase, dataset.y, dataset.e,
bjd_plot[plot_out_keyword][dataset_name]['systematics'],
bjd_plot[plot_out_keyword][dataset_name][model_name],
bjd_plot[plot_out_keyword][dataset_name]['complete'],
dataset.y - bjd_plot[plot_out_keyword][dataset_name]['complete'] +
bjd_plot[plot_out_keyword][dataset_name][model_name],
dataset.y - bjd_plot[plot_out_keyword][dataset_name]['complete'],
bjd_plot[plot_out_keyword][dataset_name]['jitter']):
fileout.write('{0:f} {1:f} {2:f} {3:f} {4:f} {5:f} {6:1f} {7:f} {8:f} {9:f} {10:f} {11:f} {12:f}'
'\n'.format(x, tcf, pha, y, e, sys, mod, com, obs_mod, e, res, e, jit))
fileout.close()
if getattr(mc.models[model_name], 'systematic_model', False):
continue
if getattr(mc.models[model_name], 'jitter_model', False):
continue
fileout = open(dir_models + dataset_name + '_' + model_name + '_full.dat', 'w')
if model_name + '_std' in bjd_plot[plot_x_keyword][dataset_name]:
fileout.write('descriptor BJD Tc_folded phase mod,+- \n')
for x, tfc, pha, mod, std in zip(
bjd_plot[dataset_name]['x_plot'],
tc_folded_plot,
phase_plot,
bjd_plot[plot_x_keyword][dataset_name][model_name],
bjd_plot[plot_x_keyword][dataset_name][model_name + '_std']):
fileout.write('{0:f} {1:f} {2:f} {3:f} {4:f} \n'.format(x, tcf, pha, mod, std))
fileout.close()
else:
fileout.write('descriptor BJD Tc_folded phase mod \n')
for x, tcf, pha, mod in zip(bjd_plot[dataset_name]['x_plot'],
tc_folded_plot,
phase_plot,
bjd_plot[plot_x_keyword][dataset_name][model_name]):
fileout.write('{0:f} {1:f} {2:f} {3:f}\n'.format(x, tcf, pha, mod))
fileout.close()
if getattr(mc.models[model_name], 'model_class', False) == 'transit':
"""
Exceptional model writing to deal with under-sampled lightcurves, i.e. when folding the
the light curve from the model file is not good enough. Something similar is performed later
with the planetary RVs, but here we must keep into account the differences between datasets
due to limb darkening, exposure times, etc.
"""
variable_values = {}
for common_ref in mc.models[model_name].common_ref:
variable_values.update(mc.common_models[common_ref].convert(chain_ref))
variable_values.update(mc.models[model_name].convert(chain_ref, dataset_name))
fileout = open(dir_models + dataset_name + '_' + model_name + '_transit.dat', 'w')
x_range = | np.arange(-variable_values['P']/2., variable_values['P']/2., 0.01) | numpy.arange |
import numpy as np
def ApplyWindowFunction(t,v,WindowFunction=None,Param=None):
'''
Apply a window function to a time series.
Inputs
======
t : float
Time array
v : float
Time series data to be windowed
WindowFunction : None | str
If None - no window is applied, otherwise the string names the
window function to be applied (see below for list of functions)
Param : float
Sometimes a window function may be modified by some parameter,
setting this keyword to None will force the routine to use a
default value where needed.
Returns
=======
vw : float
Time series data, v, with the appropriate window function
applied to it.
Window Functions
================
Function | Param
--------------------|-------------
None | N/A
'cosine-bell' | float (percentage)
'hamming' | float (percentage)
'triangle' | float (percentage)
'welch' | float (percentage)
'blackman' | float (percentage)
'nuttall' | float (percentage)
'blackman-nuttall' | float (percentage)
'flat-top' | float (percentage)
'cosine' | float (percentage)
'gaussian' | tuple: (float (width),float (percentage))
'''
WF = { 'none': (_WFNone,0.0),
'cosine-bell': (_WFCosineBell,10.0),
'hamming': (_WFHamming,50.0),
'hann': (_WFHann,50.0),
'triangle': (_WFTriangle,50.0),
'welch': (_WFWelch,50.0),
'blackman': (_WFBlackman,50.0),
'nuttall': (_WFNuttall,50.0),
'blackman-nuttall': (_WFBlackmanNuttall,50.0),
'flat-top': (_WFFlatTop,50.0),
'cosine': (_WFCosine,10.0),
'gaussian': (_WFGaussian,(0.4,50.0))}
# get the appropriate window function and parameters
Func,Pdef = WF.get(WindowFunction,(_WFNone,0.0))
#check if anycustom parameters are being used
if Param is None:
P = Pdef
else:
P = Param
#apply to data
return Func(t,v,P)
def WindowScaleFactor(WindowFunction=None,Param=None):
'''
Work out the scaling factor for the amplitude due to the choice of
window function.
'''
SF = { 'none': (_SFNone,0.0),
'cosine-bell': (_SFCosineBell,10.0),
'hamming': (_SFHamming,50.0),
'hann': (_SFHann,50.0),
'triangle': (_SFTriangle,50.0),
'welch': (_SFWelch,50.0),
'blackman': (_SFBlackman,50.0),
'nuttall': (_SFNuttall,50.0),
'blackman-nuttall': (_SFBlackmanNuttall,50.0),
'flat-top': (_SFFlatTop,50.0),
'cosine': (_SFCosine,10.0),
'gaussian': (_SFGaussian,(0.4,50.0))}
# get the appropriate window function and parameters
Func,Pdef = SF.get(WindowFunction,(_SFNone,0.0))
#check if anycustom parameters are being used
if Param is None:
P = Pdef
else:
P = Param
return Func(P)
def _WFNone(t,v,P):
'''
No window function - just return original array.
'''
return v
def _SFNone(P):
'''
Scaling factor of the uniform window.
'''
return 1.0
def _WFCosineBell(t,v,P=10.0):
'''
This will multiply the date by the Split cosine bell function.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of time series at each end to be part of the cosine.
The remaining 100 - 2*P % is left unchanged (if you set to 50.0,
then the whole window has the function applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#first section
w[i0] = 0.5 + 0.5*np.cos((ts[i0]/P + 1.0)*np.pi)
#last section
w[i2] = 0.5 + 0.5*np.cos(np.pi*(ts[i2] - (100 - P))/P)
#multiply by v
out = v*w
return out
def _SFCosineBell(P):
'''
Scaling factor of the cosine-bell function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.5
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFHamming(t,v,P=50.0):
'''
This will multiply the date by the Hamming window function.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = np.nanmax(t)
tr = t1 - t0
#get a scaled time array
ts = 100.0*(t - t0)/tr
#work out the indices for each section
i0 = np.where(ts < P)[0]
i2 = np.where(ts > (100 - P))[0]
#calculate the window function
w = np.ones(t.size,dtype=v.dtype)
#first section
w[i0] = 0.53836 - 0.46164*np.cos(np.pi*ts[i0]/P)
#last section
w[i2] = 0.53836 - 0.46164*np.cos(np.pi*(1.0 + (ts[i2] - (100 - P))/P))
#multiply by v
out = v*w
return out
def _SFHamming(P):
'''
Scaling factor of the Hamming function.
'''
#work out the proportions of the window to which the function is
#applied
#untouched portion
p0 = 0.01*(100 - 2*P)
#touched portion
p1 = 1.0 - p0
#this is the integral of the window function if it were to apply
#to the entire window of data
Iwind = 0.53836
#calculate the overall factor
sf = p0 + Iwind*p1
return sf
def _WFHann(t,v,P=50.0):
'''
This will multiply the date by the Hann window (sometimes
erroneously called the "Hanning" window). This is similar to the
Hamming window, but is touches zero at each end.
Inputs
======
t : float
Time array
v : float
Time series to be windowed
P : float
Percentage of window to have the function applied at each end
(if you set to 50.0, then the whole window has the function
applied to it).
Returns
=======
out : float
Windowed version of v
'''
#get the time range
t0 = np.nanmin(t)
t1 = | np.nanmax(t) | numpy.nanmax |
# Copyright (C) 2021 <NAME>, <NAME>, and Politecnico di Milano. All rights reserved.
# Licensed under the Apache 2.0 License.
import sys
sys.path = ['.'] + sys.path
import numpy as np
import scipy.stats as stats
# import open bandit pipeline (obp)
from obp.ope.utils import find_optimal_lambda, estimate_lambda
from prettytable import PrettyTable
from scipy.stats import t
from scipy.optimize import minimize
SIGMA2_B = [1]
SIGMA2_E = [1.5, 1.9, 1.99, 1.999]
N = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000]
N = [10, 20, 50, 100, 200, 500, 1000]
N_ref = 10000000 #samples to compute all things
mu_b, mu_e = 0., 0.5
n_runs = 60
significance = 0.1
def f(x):
return 100*np.cos(2*np.pi*x)
def generate_dataset(n, mu, sigma2):
generated_samples = stats.norm.rvs(size=n, loc=mu, scale=np.sqrt(sigma2))
return generated_samples, f(generated_samples)
def compute_renyi_divergence(mu_b, sigma2_b, mu_e, sigma2_e, alpha=2):
var_star = alpha * sigma2_b + (1 - alpha) * sigma2_e
contextual_non_exp_Renyi = np.log(sigma2_b ** .5 / sigma2_e ** .5) + 1 / (2 * (alpha - 1)) * np.log(
sigma2_b / var_star) + alpha * (mu_e - mu_b) ** 2 / (2 * var_star)
non_exp_Renyi = np.mean(contextual_non_exp_Renyi)
exp_Renyi = np.exp(non_exp_Renyi)
return exp_Renyi
def welch_test(res):
res_mean = np.mean(res, axis=0)
res_std = | np.var(res, axis=0) | numpy.var |
import numpy as np
import theano.tensor as tt
import pymc3 as pm
import starry
from starry._plotting import (
get_moll_latitude_lines,
get_moll_longitude_lines,
)
from matplotlib import pyplot as plt
from matplotlib import colors
from scipy import optimize
np.random.seed(42)
starry.config.lazy = True
ydeg = 20
map = starry.Map(ydeg)
lat, lon, Y2P, P2Y, Dx, Dy = map.get_pixel_transforms(oversample=4)
npix = Y2P.shape[0]
std_p = 1.62
with pm.Model() as model:
p = pm.Exponential("p", 1 / std_p, shape=(npix,))
x = tt.dot(P2Y, p)
pm.Deterministic("x", x)
p_back = tt.dot(Y2P, x)
pm.Deterministic("p_back", p_back)
trace_pp = pm.sample_prior_predictive(10)
# Convert lat, lon to x,y coordinates in Mollewiede projection
def lon_lat_to_mollweide(lon, lat):
lat *= np.pi / 180
lon *= np.pi / 180
f = lambda x: 2 * x + np.sin(2 * x) - np.pi * np.sin(lat)
theta = optimize.newton(f, 0.3)
x = 2 * np.sqrt(2) / np.pi * lon * np.cos(theta)
y = np.sqrt(2) * np.sin(theta)
return x, y
x_mol = np.zeros(npix)
y_mol = np.zeros(npix)
for idx, (lo, la) in enumerate(zip(lon, lat)):
x_, y_ = lon_lat_to_mollweide(lo, la)
x_mol[idx] = x_
y_mol[idx] = y_
def plot_grid_lines(ax, alpha=0.6):
"""
Code from https://github.com/rodluger/starry/blob/0546b4e445f6570b9a1cf6e33068e01a96ecf20f/starry/maps.py.
"""
ax.axis("off")
borders = []
x = np.linspace(-2 * np.sqrt(2), 2 * np.sqrt(2), 10000)
y = np.sqrt(2) * np.sqrt(1 - (x / (2 * np.sqrt(2))) ** 2)
borders += [ax.fill_between(x, 1.1 * y, y, color="w", zorder=-1)]
borders += [
ax.fill_betweenx(0.5 * x, 2.2 * y, 2 * y, color="w", zorder=-1)
]
borders += [ax.fill_between(x, -1.1 * y, -y, color="w", zorder=-1)]
borders += [
ax.fill_betweenx(0.5 * x, -2.2 * y, -2 * y, color="w", zorder=-1)
]
x = np.linspace(-2 * np.sqrt(2), 2 * | np.sqrt(2) | numpy.sqrt |
import warnings
import numpy as np
import numpy.testing as npt
from dipy.data import get_fnames
from dipy.core.gradients import (gradient_table, GradientTable,
gradient_table_from_bvals_bvecs,
gradient_table_from_qvals_bvecs,
gradient_table_from_gradient_strength_bvecs,
WATER_GYROMAGNETIC_RATIO,
reorient_bvecs, generate_bvecs,
check_multi_b)
from dipy.io.gradients import read_bvals_bvecs
def test_btable_prepare():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
# bt.info
fimg, fbvals, fbvecs = get_fnames('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
bvecs = np.where(np.isnan(bvecs), 0, bvecs)
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
bt2 = gradient_table(bvals, bvecs.T)
npt.assert_array_equal(bt2.bvecs, bvecs)
btab = np.concatenate((bvals[:, None], bvecs), axis=1)
bt3 = gradient_table(btab)
npt.assert_array_equal(bt3.bvecs, bvecs)
npt.assert_array_equal(bt3.bvals, bvals)
bt4 = gradient_table(btab.T)
npt.assert_array_equal(bt4.bvecs, bvecs)
npt.assert_array_equal(bt4.bvals, bvals)
# Test for proper inputs (expects either bvals/bvecs or 4 by n):
npt.assert_raises(ValueError, gradient_table, bvecs)
def test_GradientTable():
gradients = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[3, 4, 0],
[5, 0, 12]], 'float')
expected_bvals = np.array([0, 1, 1, 5, 13])
expected_b0s_mask = expected_bvals == 0
expected_bvecs = gradients / (expected_bvals + expected_b0s_mask)[:, None]
gt = GradientTable(gradients, b0_threshold=0)
npt.assert_array_almost_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.b0s_mask, expected_b0s_mask)
npt.assert_array_almost_equal(gt.bvecs, expected_bvecs)
npt.assert_array_almost_equal(gt.gradients, gradients)
gt = GradientTable(gradients, b0_threshold=1)
npt.assert_array_equal(gt.b0s_mask, [1, 1, 1, 0, 0])
npt.assert_array_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.bvecs, expected_bvecs)
# checks negative values in gtab
npt.assert_raises(ValueError, GradientTable, -1)
npt.assert_raises(ValueError, GradientTable, np.ones((6, 2)))
npt.assert_raises(ValueError, GradientTable, np.ones((6,)))
with warnings.catch_warnings(record=True) as w:
bad_gt = gradient_table(expected_bvals, expected_bvecs,
b0_threshold=200)
assert len(w) == 1
def test_gradient_table_from_qvals_bvecs():
qvals = 30. * np.ones(7)
big_delta = .03 # pulse separation of 30ms
small_delta = 0.01 # pulse duration of 10ms
qvals[0] = 0
sq2 = np.sqrt(2) / 2
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_qvals_bvecs(qvals, bvecs,
big_delta, small_delta)
bvals_expected = (qvals * 2 * np.pi) ** 2 * (big_delta - small_delta / 3.)
gradient_strength_expected = qvals * 2 * np.pi /\
(small_delta * WATER_GYROMAGNETIC_RATIO)
npt.assert_almost_equal(gt.gradient_strength, gradient_strength_expected)
npt.assert_almost_equal(gt.bvals, bvals_expected)
def test_gradient_table_from_gradient_strength_bvecs():
gradient_strength = .03e-3 * np.ones(7) # clinical strength at 30 mT/m
big_delta = .03 # pulse separation of 30ms
small_delta = 0.01 # pulse duration of 10ms
gradient_strength[0] = 0
sq2 = np.sqrt(2) / 2
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_gradient_strength_bvecs(gradient_strength, bvecs,
big_delta, small_delta)
qvals_expected = (gradient_strength * WATER_GYROMAGNETIC_RATIO *
small_delta / (2 * np.pi))
bvals_expected = (qvals_expected * 2 * np.pi) ** 2 *\
(big_delta - small_delta / 3.)
npt.assert_almost_equal(gt.qvals, qvals_expected)
npt.assert_almost_equal(gt.bvals, bvals_expected)
def test_gradient_table_from_bvals_bvecs():
sq2 = np.sqrt(2) / 2
bvals = [0, 1, 2, 3, 4, 5, 6, 0]
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
npt.assert_array_equal(gt.bvals, bvals)
npt.assert_array_equal(gt.gradients, np.reshape(bvals, (-1, 1)) * bvecs)
npt.assert_array_equal(gt.b0s_mask, [1, 0, 0, 0, 0, 0, 0, 1])
# Test nans are replaced by 0
new_bvecs = bvecs.copy()
new_bvecs[[0, -1]] = np.nan
gt = gradient_table_from_bvals_bvecs(bvals, new_bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)
# Bvalue > 0 for non-unit vector
bad_bvals = [2, 1, 2, 3, 4, 5, 6, 0]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# num_gard inconsistent bvals, bvecs
bad_bvals = np.ones(7)
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# negative bvals
bad_bvals = [-1, -1, -1, -5, -6, -10]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvals not 1d
bad_bvals = np.ones((1, 8))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
# bvec not 2d
bad_bvecs = np.ones((1, 8, 3))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvec not (N, 3)
bad_bvecs = np.ones((8, 2))
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# bvecs not unit vectors
bad_bvecs = bvecs * 2
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bvals,
bad_bvecs, b0_threshold=0.)
# Test **kargs get passed along
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0,
big_delta=5, small_delta=2)
npt.assert_equal(gt.big_delta, 5)
npt.assert_equal(gt.small_delta, 2)
def test_b0s():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(8)
bvals[0] = 0
bvals[7] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2],
[0, 0, 0]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(np.where(bt.b0s_mask > 0)[0], np.array([0, 7]))
npt.assert_array_equal(np.where(bt.b0s_mask == 0)[0], np.arange(1, 7))
def test_gtable_from_files():
fimg, fbvals, fbvecs = get_fnames('small_101D')
gt = gradient_table(fbvals, fbvecs)
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
npt.assert_array_equal(gt.bvals, bvals)
npt.assert_array_equal(gt.bvecs, bvecs)
def test_deltas():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs, big_delta=5, small_delta=2)
npt.assert_equal(bt.big_delta, 5)
npt.assert_equal(bt.small_delta, 2)
def test_qvalues():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
qvals = np.sqrt(bvals / 6) / (2 * np.pi)
bt = gradient_table(bvals, bvecs, big_delta=8, small_delta=6)
npt.assert_almost_equal(bt.qvals, qvals)
def test_reorient_bvecs():
sq2 = np.sqrt(2) / 2
bvals = np.concatenate([[0], np.ones(6) * 1000])
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
gt = gradient_table_from_bvals_bvecs(bvals, bvecs, b0_threshold=0)
# The simple case: all affines are identity
affs = np.zeros((6, 4, 4))
for i in range(4):
affs[:, i, i] = 1
# We should get back the same b-vectors
new_gt = reorient_bvecs(gt, affs)
npt.assert_equal(gt.bvecs, new_gt.bvecs)
# Now apply some rotations
rotation_affines = []
rotated_bvecs = bvecs[:]
for i in | np.where(~gt.b0s_mask) | numpy.where |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': | np.array([0.13717260713320106, 0.3627779907901665]) | numpy.array |
import torch
import matplotlib.pyplot as plt
import numpy as np
from skimage import io as img
from skimage import color, filters, morphology
import os
import glob
from PIL import Image
import torchvision.transforms as transforms
from . import keypoint_functions
def makedir(path):
try:
os.makedirs(path)
except OSError:
pass
def denorm(x):
if torch.min(x) < -1 or torch.max(x) > 1:
return _normalize(x)
out = (x + 1) / 2
return out.clamp(0, 1)
def norm(x):
out = (x - 0.5) * 2
return out.clamp(-1, 1)
def _normalize(tensor):
tensor = tensor.clone() # avoid modifying tensor in-place
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
return img.add_(-min).div_(max - min + 1e-5)
def norm_range(t):
return norm_ip(t, float(t.min()), float(t.max()))
tensor = norm_range(tensor)
return tensor
def convert_image_np(inp):
if inp.shape[1]==3:
inp = denorm(inp)
inp = inp[-1,:,:,:].to(torch.device('cpu'))
inp = inp.numpy().transpose((1,2,0))
else:
inp = denorm(inp)
inp = inp[-1,-1,:,:].to(torch.device('cpu'))
inp = inp.numpy().transpose((0,1))
inp = np.clip(inp,0,1)
return inp
def save_image(name, image):
plt.imsave(name, convert_image_np(image), vmin=0, vmax=1)
def read_images_and_keypoints(opt):
imgs = glob.glob(os.path.join(opt.dataroot, "*.jpg")) + glob.glob(os.path.join(opt.dataroot, "*.png")) + glob.glob(os.path.join(opt.dataroot, "*.jpeg"))
keypoints = keypoint_functions.load_keypoints(opt)
images = []
keypoints_1d = []
keypoints_2d = []
num_kps = opt.num_keypoints
# load images and corresponding keypoints
for _img in sorted(imgs):
name = _img.split("/")[-1].split(".")[0]
x = img.imread(_img)
x = x[:, :, :3]
# automatically construct the mask based on background color
if opt.mask:
save_dir = os.path.join(opt.dir2save, "masks")
makedir(save_dir)
alpha = np.ones_like(x[:, :, 0])
alpha[np.isclose(np.mean(x, axis=2), opt.bkg_color, rtol=1e-1)] = 0
alpha = np.array(alpha, dtype=bool)
alpha = morphology.remove_small_objects(alpha, 10, connectivity=1)
alpha = morphology.remove_small_holes(alpha, 2, connectivity=2)
alpha = np.array(alpha, dtype=float)
alpha = np.expand_dims(alpha, -1)
alpha_img = np.repeat(alpha, 3, axis=2)
alpha = alpha * 255
plt.imsave(os.path.join(save_dir, "mask_{}.jpg".format(name)), alpha_img, vmin=0, vmax=255)
alpha = alpha.astype(np.uint8)
# load corresponding keypoints for current image
try:
img_keypoints = keypoints[_img.split("/")[-1]]
except KeyError:
print("Found no matching keypoints for {}...skipping this image.".format(name))
continue
# normalize keypoint conditioning
x_condition = keypoint_functions.create_keypoint_condition(x, img_keypoints, opt, num_kps)
x_condition = (x_condition + 1) / 2.0
if opt.mask:
x = np.concatenate([x, alpha], -1)
images.append(x)
keypoints_1d.append(img_keypoints)
keypoints_2d.append(x_condition)
return images, keypoints_1d, keypoints_2d
def generate_keypoint_condition(kps, opt):
a_path_rgb = np.zeros((opt.image_size_y, opt.image_size_x, 3))
colors = keypoint_functions.get_keypoint_colors()
keypoint_layers = keypoint_functions.load_layer_information(opt)
kps_2d = keypoint_functions.create_keypoint_condition(a_path_rgb, kps, opt, num_keypoints=opt.num_keypoints)
kps_2d = torch.from_numpy(kps_2d)
kps_2d = (kps_2d + 1) / 2.0
# each keypoint condition for an image is now a list where each list contains the information
# about the keypoints in the given layer for the given image
layered_keypoints_2d = []
for layer in keypoint_layers:
layered_keypoints_2d.append(kps_2d[[layer], :, :].squeeze())
kps_2d = layered_keypoints_2d
layered_keypoints_1d = []
for layer in keypoint_layers:
current_keypoint_1d = {x: kps[x] for x in layer}
layered_keypoints_1d.append(current_keypoint_1d)
kps = layered_keypoints_1d
transform_list = []
transform_list += [transforms.ToTensor()]
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform = transforms.Compose(transform_list)
img_label = []
for layer_idx in range(len(keypoint_layers)):
a_path_rgb = np.zeros((3, opt.image_size_y, opt.image_size_x))
for idx in range(kps_2d[layer_idx].shape[0]):
current_kp = np.expand_dims(kps_2d[layer_idx][idx], 0)
current_color = np.zeros_like(a_path_rgb)
current_color[0] = colors[idx][0]
current_color[1] = colors[idx][1]
current_color[2] = colors[idx][2]
a_path_rgb = a_path_rgb + ( | np.repeat(current_kp, repeats=3, axis=0) | numpy.repeat |
import numpy as np
import scipy as sp
import scipy.stats
def _calculate_number_alleles(G):
G = np.asarray(G, int)
assert len(G.shape) == 2
u = np.unique(G[:])
assert np.all([ui in [0, 1, 2] for ui in u])
b = np.sum(G, axis=0)
a = G.shape[0]*2 - b
return (a, b)
def _normalize_maf_allele(G):
(a, b) = _calculate_number_alleles(G)
change = b > a
G[:, change] = 2 - G[:, change]
def _calculate_maf(G):
return np.sum(G, 0) / float(2*G.shape[0])
# According to GCTA's paper
def grm_unbiased(G):
_normalize_maf_allele(G)
p = _calculate_maf(G)
denom = 2 * p * (1. - p)
n = G.shape[0]
K = np.zeros((n, n))
for j in range(n-1):
for k in range(j+1, n):
v0 = (G[j, :] - 2*p) * (G[k, :] - 2*p)
K[j, k] = np.mean(v0 / denom)
K[k, j] = K[j, k]
for j in range(n):
g2 = G[j, :]**2
v0 = g2 - (1 + 2*p) * G[j, :] + 2 * p**2
K[j, j] = 1 + np.mean(v0 / denom)
return K
if __name__ == '__main__':
# np.random.seed(5)
# # G = np.random.randint(0, 3, (100000, 1))
# G = sp.stats.binom.rvs(2, 0.5, size=(1000, 10))
# # _calculate_maf(G)
# K = grm_unbiased(G)
# import ipdb; ipdb.set_trace()
# print K
import numpy as np
import scipy as sp
import scipy.stats
np.random.seed(0)
N = 5
nfrX = sp.stats.binom.rvs(2, 0.3, size=(N, 10))
nbgX = sp.stats.binom.rvs(2, 0.5, size=(N, 10))
y = | np.random.randint(0, 2, size=N) | numpy.random.randint |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 14:51:28 2016
@author: <NAME>
"""
import os
import numpy as np
payoff_dir = "data/"
paddy_price = 100000
ofc_price = 120000
sd = np.repeat(1000, 72)
#multiplicative effects on price received
agrowell_effect = 1.2 # 1 is no effect
water_scarce_agrowell = 0.7 # i.e. reduces profit by 30 percent
water_scarce_no_agrowell = 0.2 # low tank, low rainfall, reduces profit by 80 percent
water_damage_effect = 0.2 # reduces ofc profit by 0.
bethma_effect = 0.5 # reduces profit by 50 percent
ofc_bethma_effect = 0.1 # slashes payoffs, just to ensure no one opts for this option
#additive bethma effects
agrowell_bm_effect = 0.1 # changes effect on profit by 0.1
high_rf_bm_effect = 0.1 # changes effect on profit by 0.1
#build dataset
tank_level = np.repeat((1,2,3), 24)
rainfall = np.tile(np.repeat((1,2,3), 8), 3)
crop_choice = np.tile(np.repeat((1,0), 4), 9)
bethma_choice = np.tile((1,0), 36)
agrowell_owner = | np.tile((1,0,0,1), 18) | numpy.tile |
"""
Group-wise function alignment using SRSF framework and Dynamic Programming
moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import fdasrsf.utility_functions as uf
import fdasrsf.bayesian_functions as bf
import fdasrsf.fPCA as fpca
import fdasrsf.geometry as geo
from scipy.integrate import trapz, cumtrapz
from scipy.interpolate import interp1d
from scipy.linalg import svd, cholesky
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform, pdist
import GPy
from numpy.linalg import norm, inv
from numpy.random import rand, normal
from joblib import Parallel, delayed
from fdasrsf.fPLS import pls_svd
from tqdm import tqdm
import fdasrsf.plot_style as plot
import fpls_warp as fpls
import collections
class fdawarp:
"""
This class provides alignment methods for functional data using the SRVF framework
Usage: obj = fdawarp(f,t)
:param f: (M,N): matrix defining N functions of M samples
:param time: time vector of length M
:param fn: aligned functions
:param qn: aligned srvfs
:param q0: initial srvfs
:param fmean: function mean
:param mqn: mean srvf
:param gam: warping functions
:param psi: srvf of warping functions
:param stats: alignment statistics
:param qun: cost function
:param lambda: lambda
:param method: optimization method
:param gamI: inverse warping function
:param rsamps: random samples
:param fs: random aligned functions
:param gams: random warping functions
:param ft: random warped functions
:param qs: random aligned srvfs
:param type: alignment type
:param mcmc: mcmc output if bayesian
Author : <NAME> (JDT) <jdtuck AT sandia.gov>
Date : 15-Mar-2018
"""
def __init__(self, f, time):
"""
Construct an instance of the fdawarp class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.time = time
self.rsamps = False
def srsf_align(self, method="mean", omethod="DP2", center=True,
smoothdata=False, MaxItr=20, parallel=False, lam=0.0,
cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic
square-root slope (srsf) framework.
:param method: (string) warp calculate Karcher Mean or Median
(options = "mean" or "median") (default="mean")
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP2)
:param center: center warping functions (default = T)
:param smoothdata: Smooth the data using a box filter (default = F)
:param MaxItr: Maximum number of iterations (default = 20)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
Examples
>>> import tables
>>> fun=tables.open_file("../Data/simu_data.h5")
>>> f = fun.root.f[:]
>>> f = f.transpose()
>>> time = fun.root.time[:]
>>> obj = fs.fdawarp(f,time)
>>> obj.srsf_align()
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
f0 = self.f
self.method = omethod
methods = ["mean", "median"]
self.type = method
# 0 mean, 1-median
method = [i for i, x in enumerate(methods) if x == method]
if len(method) == 0:
method = 0
else:
method = method[0]
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
print("Initializing...")
mnq = q.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (q - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
mq = q[:, min_ind]
mf = f[:, min_ind]
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
gamI = uf.SqrtMeanInverse(gam)
mf = np.interp((self.time[-1] - self.time[0]) * gamI + self.time[0], self.time, mf)
mq = uf.f_to_srsf(mf, self.time)
# Compute Karcher Mean
if method == 0:
print("Compute Karcher Mean of %d function in SRSF space..." % N)
if method == 1:
print("Compute Karcher Median of %d function in SRSF space..." % N)
ds = np.repeat(0.0, MaxItr + 2)
ds[0] = np.inf
qun = np.repeat(0.0, MaxItr + 1)
tmp = np.zeros((M, MaxItr + 2))
tmp[:, 0] = mq
mq = tmp
tmp = np.zeros((M, MaxItr+2))
tmp[:,0] = mf
mf = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = self.f
f = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = q
q = tmp
for r in range(0, MaxItr):
print("updating step: r=%d" % (r + 1))
if r == (MaxItr - 1):
print("maximal number of iterations is reached")
# Matching Step
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r],
self.time, q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0],
omethod, lam, grid_dim)
gam_dev = np.zeros((M, N))
vtil = np.zeros((M,N))
dtil = np.zeros(N)
for k in range(0, N):
f[:, k, r + 1] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k, 0])
q[:, k, r + 1] = uf.f_to_srsf(f[:, k, r + 1], self.time)
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
v = q[:, k, r + 1] - mq[:,r]
d = np.sqrt(trapz(v*v, self.time))
vtil[:,k] = v/d
dtil[k] = 1.0/d
mqt = mq[:, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
d = (q[:, :, r + 1] - d1) ** 2
if method == 0:
d1 = sum(trapz(d, self.time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = qtemp.mean(axis=1)
mf[:, r + 1] = ftemp.mean(axis=1)
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if method == 1:
d1 = np.sqrt(sum(trapz(d, self.time, axis=0)))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
stp = .3
vbar = vtil.sum(axis=1)*(1/dtil.sum())
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = mq[:,r] + stp*vbar
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mq[:, r + 1] * np.abs(mq[:, r + 1]), self.time)
mf[:, r + 1] = np.median(f0[1, :])+tmp
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if qun[r] < 1e-2 or r >= MaxItr:
break
# Last Step with centering of gam
if center:
r += 1
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r], self.time,
q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0], omethod,
lam, grid_dim)
gam_dev = np.zeros((M, N))
for k in range(0, N):
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (self.time[-1] - self.time[0]) * gamI + self.time[0]
mq[:, r + 1] = np.interp(time0, self.time, mq[:, r]) * np.sqrt(gamI_dev)
for k in range(0, N):
q[:, k, r + 1] = np.interp(time0, self.time, q[:, k, r]) * np.sqrt(gamI_dev)
f[:, k, r + 1] = np.interp(time0, self.time, f[:, k, r])
gam[:, k] = np.interp(time0, self.time, gam[:, k])
else:
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
# Aligned data & stats
self.fn = f[:, :, r + 1]
self.qn = q[:, :, r + 1]
self.q0 = q[:, :, 0]
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
self.gam = gam
self.mqn = mq[:, r + 1]
tmp = np.zeros(M)
tmp[1:] = cumtrapz(self.mqn * np.abs(self.mqn), self.time)
self.fmean = np.mean(f0[1, :]) + tmp
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (self.time[-1] - self.time[0]) * gam[:, k] + self.time[0]
fgam[:, k] = np.interp(time0, self.time, self.fmean)
var_fgam = fgam.var(axis=1)
self.orig_var = trapz(std_f0 ** 2, self.time)
self.amp_var = trapz(std_fn ** 2, self.time)
self.phase_var = trapz(var_fgam, self.time)
return
def plot(self):
"""
plot plot functional alignment results
Usage: obj.plot()
"""
M = self.f.shape[0]
plot.f_plot(self.time, self.f, title="f Original Data")
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), self.gam,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(self.time, self.fn, title="Warped Data")
mean_f0 = self.f.mean(axis=1)
std_f0 = self.f.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
tmp = np.array([mean_f0, mean_f0 + std_f0, mean_f0 - std_f0])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Original Data: Mean $\pm$ STD")
tmp = np.array([mean_fn, mean_fn + std_fn, mean_fn - std_fn])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Warped Data: Mean $\pm$ STD")
plot.f_plot(self.time, self.fmean, title="$f_{mean}$")
plt.show()
return
def gauss_model(self, n=1, sort_samples=False):
"""
This function models the functional data using a Gaussian model
extracted from the principal components of the srvfs
:param n: number of random samples
:param sort_samples: sort samples (default = T)
:type n: integer
:type sort_samples: bool
"""
fn = self.fn
time = self.time
qn = self.qn
gam = self.gam
# Parameters
eps = np.finfo(np.double).eps
binsize = np.diff(time)
binsize = binsize.mean()
M = time.size
# compute mean and covariance in q-domain
mq_new = qn.mean(axis=1)
mididx = np.round(time.shape[0] / 2)
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
qn2 = np.vstack((qn, m_new))
C = np.cov(qn2)
q_s = np.random.multivariate_normal(mqn, C, n)
q_s = q_s.transpose()
# compute the correspondence to the original function domain
fs = np.zeros((M, n))
for k in range(0, n):
fs[:, k] = uf.cumtrapzmid(time, q_s[0:M, k] * np.abs(q_s[0:M, k]),
np.sign(q_s[M, k]) * (q_s[M, k] ** 2),
mididx)
fbar = fn.mean(axis=1)
fsbar = fs.mean(axis=1)
err = np.transpose(np.tile(fbar-fsbar, (n,1)))
fs += err
# random warping generation
rgam = uf.randomGamma(gam, n)
gams = np.zeros((M, n))
for k in range(0, n):
gams[:, k] = uf.invertGamma(rgam[:, k])
# sort functions and warping
if sort_samples:
mx = fs.max(axis=0)
seq1 = mx.argsort()
# compute the psi-function
fy = np.gradient(rgam, binsize)
psi = fy / np.sqrt(abs(fy) + eps)
ip = np.zeros(n)
len = np.zeros(n)
for i in range(0, n):
tmp = np.ones(M)
ip[i] = tmp.dot(psi[:, i] / M)
len[i] = np.arccos(tmp.dot(psi[:, i] / M))
seq2 = len.argsort()
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), fs[:, seq1[k]])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
else:
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), fs[:, k])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
self.rsamps = True
self.fs = fs
self.gams = rgam
self.ft = ft
self.qs = q_s[0:M,:]
return
def joint_gauss_model(self, n=1, no=3):
"""
This function models the functional data using a joint Gaussian model
extracted from the principal components of the srsfs
:param n: number of random samples
:param no: number of principal components (default = 3)
:type n: integer
:type no: integer
"""
# Parameters
fn = self.fn
time = self.time
qn = self.qn
gam = self.gam
M = time.size
# Perform PCA
jfpca = fpca.fdajpca(self)
jfpca.calc_fpca(no=no)
s = jfpca.latent
U = jfpca.U
C = jfpca.C
mu_psi = jfpca.mu_psi
# compute mean and covariance
mq_new = qn.mean(axis=1)
mididx = jfpca.id
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
# generate random samples
vals = np.random.multivariate_normal(np.zeros(s.shape), np.diag(s), n)
tmp = np.matmul(U, np.transpose(vals))
qhat = np.tile(mqn.T,(n,1)).T + tmp[0:M+1,:]
tmp = np.matmul(U, np.transpose(vals)/C)
vechat = tmp[(M+1):,:]
psihat = np.zeros((M,n))
gamhat = np.zeros((M,n))
for ii in range(n):
psihat[:,ii] = geo.exp_map(mu_psi,vechat[:,ii])
gam_tmp = cumtrapz(psihat[:,ii]**2,np.linspace(0,1,M),initial=0.0)
gamhat[:,ii] = (gam_tmp - gam_tmp.min())/(gam_tmp.max()-gam_tmp.min())
ft = np.zeros((M,n))
fhat = np.zeros((M,n))
for ii in range(n):
fhat[:,ii] = uf.cumtrapzmid(time, qhat[0:M,ii]*np.fabs(qhat[0:M,ii]), np.sign(qhat[M,ii])*(qhat[M,ii]*qhat[M,ii]), mididx)
ft[:,ii] = uf.warp_f_gamma(np.linspace(0,1,M),fhat[:,ii],gamhat[:,ii])
self.rsamps = True
self.fs = fhat
self.gams = gamhat
self.ft = ft
self.qs = qhat[0:M,:]
return
def multiple_align_functions(self, mu, omethod="DP2", smoothdata=False,
parallel=False, lam=0.0, cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic square-root
slope (srsf) framework.
Usage: obj.multiple_align_functions(mu)
obj.multiple_align_functions(lambda)
obj.multiple_align_functions(lambda, ...)
:param mu: vector of function to align to
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP)
:param smoothdata: Smooth the data using a box filter (default = F)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
self.method = omethod
self.type = "multiple"
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
mq = uf.f_to_srsf(mu, self.time)
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
self.gamI = uf.SqrtMeanInverse(gam)
fn = np.zeros((M,N))
qn = np.zeros((M,N))
for k in range(0, N):
fn[:, k] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k])
qn[:, k] = uf.f_to_srsf(f[:, k], self.time)
# Aligned data & stats
self.fn = fn
self.qn = qn
self.q0 = q
mean_f0 = f.mean(axis=1)
std_f0 = f.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
self.gam = gam
self.mqn = mq
self.fmean = mu
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (self.time[-1] - self.time[0]) * gam[:, k] + self.time[0]
fgam[:, k] = np.interp(time0, self.time, self.fmean)
var_fgam = fgam.var(axis=1)
self.orig_var = trapz(std_f0 ** 2, self.time)
self.amp_var = trapz(std_fn ** 2, self.time)
self.phase_var = trapz(var_fgam, self.time)
return
def pairwise_align_functions(f1, f2, time, omethod="DP2", lam=0, grid_dim=7):
"""
This function aligns f2 to f1 using the elastic square-root
slope (srsf) framework.
Usage: out = pairwise_align_functions(f1, f2, time)
out = pairwise_align_functions(f1, f2, time, omethod, lam, grid_dim)
:param f1: vector defining M samples of function 1
:param f2: vector defining M samples of function 2
:param time: time vector of length M
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP)
:param lam: controls the elasticity (default = 0)
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:rtype list containing
:return f2n: aligned f2
:return gam: warping function
:return q2n: aligned q2 (srsf)
"""
q1 = uf.f_to_srsf(f1, time)
q2 = uf.f_to_srsf(f2, time)
gam = uf.optimum_reparam(q1, time, q2, omethod, lam, grid_dim)
f2n = uf.warp_f_gamma(time, f2 , gam)
q2n = uf.f_to_srsf(f2n, time)
return (f2n, gam, q2n)
def pairwise_align_bayes(f1i, f2i, time, mcmcopts=None):
"""
This function aligns two functions using Bayesian framework. It will align
f2 to f1. It is based on mapping warping functions to a hypersphere, and a
subsequent exponential mapping to a tangent space. In the tangent space,
the Z-mixture pCN algorithm is used to explore both local and global
structure in the posterior distribution.
The Z-mixture pCN algorithm uses a mixture distribution for the proposal
distribution, controlled by input parameter zpcn. The zpcn$betas must be
between 0 and 1, and are the coefficients of the mixture components, with
larger coefficients corresponding to larger shifts in parameter space. The
zpcn["probs"] give the probability of each shift size.
Usage: out = pairwise_align_bayes(f1i, f2i, time)
out = pairwise_align_bayes(f1i, f2i, time, mcmcopts)
:param f1i: vector defining M samples of function 1
:param f2i: vector defining M samples of function 2
:param time: time vector of length M
:param mcmopts: dict of mcmc parameters
:type mcmcopts: dict
default mcmc options:
tmp = {"betas":np.array([0.5,0.5,0.005,0.0001]),"probs":np.array([0.1,0.1,0.7,0.1])}
mcmcopts = {"iter":2*(10**4) ,"burnin":np.minimum(5*(10**3),2*(10**4)//2),
"alpha0":0.1, "beta0":0.1,"zpcn":tmp,"propvar":1,
"initcoef":np.repeat(0,20), "npoints":200, "extrainfo":True}
:rtype collection containing
:return f2_warped: aligned f2
:return gamma: warping function
:return g_coef: final g_coef
:return psi: final psi
:return sigma1: final sigma
if extrainfo
:return accept: accept of psi samples
:return betas_ind
:return logl: log likelihood
:return gamma_mat: posterior gammas
:return gamma_stats: posterior gamma stats
:return xdist: phase distance posterior
:return ydist: amplitude distance posterior)
"""
if mcmcopts is None:
tmp = {"betas":np.array([0.5,0.5,0.005,0.0001]),"probs":np.array([0.1,0.1,0.7,0.1])}
mcmcopts = {"iter":2*(10**4) ,"burnin":np.minimum(5*(10**3),2*(10**4)//2),"alpha0":0.1,
"beta0":0.1,"zpcn":tmp,"propvar":1,
"initcoef":np.repeat(0,20), "npoints":200, "extrainfo":True}
if f1i.shape[0] != f2i.shape[0]:
raise Exception('Length of f1 and f2 must be equal')
if f1i.shape[0] != time.shape[0]:
raise Exception('Length of f1 and time must be equal')
if mcmcopts["zpcn"]["betas"].shape[0] != mcmcopts["zpcn"]["probs"].shape[0]:
raise Exception('In zpcn, betas must equal length of probs')
if np.mod(mcmcopts["initcoef"].shape[0], 2) != 0:
raise Exception('Length of mcmcopts.initcoef must be even')
# Number of sig figs to report in gamma_mat
SIG_GAM = 13
iter = mcmcopts["iter"]
# parameter settings
pw_sim_global_burnin = mcmcopts["burnin"]
valid_index = np.arange(pw_sim_global_burnin-1,iter)
pw_sim_global_Mg = mcmcopts["initcoef"].shape[0]//2
g_coef_ini = mcmcopts["initcoef"]
numSimPoints = mcmcopts["npoints"]
pw_sim_global_domain_par = np.linspace(0,1,numSimPoints)
g_basis = uf.basis_fourier(pw_sim_global_domain_par, pw_sim_global_Mg, 1)
sigma1_ini = 1
zpcn = mcmcopts["zpcn"]
pw_sim_global_sigma_g = mcmcopts["propvar"]
def propose_g_coef(g_coef_curr):
pCN_beta = zpcn["betas"]
pCN_prob = zpcn["probs"]
probm = np.insert(np.cumsum(pCN_prob),0,0)
z = np.random.rand()
result = {"prop":g_coef_curr,"ind":1}
for i in range (0,pCN_beta.shape[0]):
if z <= probm[i+1] and z > probm[i]:
g_coef_new = normal(0, pw_sim_global_sigma_g / np.repeat(np.arange(1,pw_sim_global_Mg+1),2))
result["prop"] = np.sqrt(1-pCN_beta[i]**2) * g_coef_curr + pCN_beta[i] * g_coef_new
result["ind"] = i
return result
# normalize time to [0,1]
time = (time - time.min())/(time.max()-time.min())
timet = np.linspace(0,1,numSimPoints)
f1 = uf.f_predictfunction(f1i,timet,0)
f2 = uf.f_predictfunction(f2i,timet,0)
# srsf transformation
q1 = uf.f_to_srsf(f1,timet)
q1i = uf.f_to_srsf(f1i,time)
q2 = uf.f_to_srsf(f2,timet)
tmp = uf.f_exp1(uf.f_basistofunction(g_basis["x"],0,g_coef_ini,g_basis))
if tmp.min() < 0:
raise Exception("Invalid initial value of g")
# result vectors
g_coef = np.zeros((iter,g_coef_ini.shape[0]))
sigma1 = np.zeros(iter)
logl = np.zeros(iter)
SSE = np.zeros(iter)
accept = np.zeros(iter, dtype=bool)
accept_betas = np.zeros(iter)
# init
g_coef_curr = g_coef_ini
sigma1_curr = sigma1_ini
SSE_curr = bf.f_SSEg_pw(uf.f_basistofunction(g_basis["x"],0,g_coef_ini,g_basis),q1,q2)
logl_curr = bf.f_logl_pw(uf.f_basistofunction(g_basis["x"],0,g_coef_ini,g_basis),q1,q2,sigma1_ini**2,SSE_curr)
g_coef[0,:] = g_coef_ini
sigma1[0] = sigma1_ini
SSE[0] = SSE_curr
logl[0] = logl_curr
# update the chain for iter-1 times
for m in tqdm(range(1,iter)):
# update g
g_coef_curr, tmp, SSE_curr, accepti, zpcnInd = bf.f_updateg_pw(g_coef_curr, g_basis, sigma1_curr**2, q1, q2, SSE_curr, propose_g_coef)
# update sigma1
newshape = q1.shape[0]/2 + mcmcopts["alpha0"]
newscale = 1/2 * SSE_curr + mcmcopts["beta0"]
sigma1_curr = np.sqrt(1/np.random.gamma(newshape,1/newscale))
logl_curr = bf.f_logl_pw(uf.f_basistofunction(g_basis["x"],0,g_coef_curr,g_basis), q1, q2, sigma1_curr**2, SSE_curr)
# save updates to results
g_coef[m,:] = g_coef_curr
sigma1[m] = sigma1_curr
SSE[m] = SSE_curr
if mcmcopts["extrainfo"]:
logl[m] = logl_curr
accept[m] = accepti
accept_betas[m] = zpcnInd
# calculate posterior mean of psi
pw_sim_est_psi_matrix = np.zeros((numSimPoints,valid_index.shape[0]))
for k in range(0,valid_index.shape[0]):
g_temp = uf.f_basistofunction(g_basis["x"],0,g_coef[valid_index[k],:],g_basis)
psi_temp = uf.f_exp1(g_temp)
pw_sim_est_psi_matrix[:,k] = psi_temp
result_posterior_psi_simDomain = uf.f_psimean(pw_sim_global_domain_par, pw_sim_est_psi_matrix)
# resample to same number of points as the input f1 and f2
interp = interp1d(np.linspace(0,1,result_posterior_psi_simDomain.shape[0]), result_posterior_psi_simDomain, fill_value="extrapolate")
result_posterior_psi = interp(np.linspace(0,1,f1i.shape[0]))
# transform posterior mean of psi to gamma
result_posterior_gamma = uf.f_phiinv(result_posterior_psi)
result_posterior_gamma = uf.norm_gam(result_posterior_gamma)
# warped f2
f2_warped = uf.warp_f_gamma(time, f2i, result_posterior_gamma)
if mcmcopts["extrainfo"]:
M,N = pw_sim_est_psi_matrix.shape
gamma_mat = np.zeros((time.shape[0],N))
one_v = np.ones(M)
Dx = np.zeros(N)
Dy = Dx
for ii in range(0,N):
interp = interp1d(np.linspace(0,1,result_posterior_psi_simDomain.shape[0]), pw_sim_est_psi_matrix[:,ii], fill_value="extrapolate")
result_i = interp(time)
tmp = uf.f_phiinv(result_i)
gamma_mat[:,ii] = uf.norm_gam(tmp)
v, theta = geo.inv_exp_map(one_v,pw_sim_est_psi_matrix[:,ii])
Dx[ii] = np.sqrt(trapz(v**2,pw_sim_global_domain_par))
q2warp = uf.warp_q_gamma(pw_sim_global_domain_par,q2,gamma_mat[:,ii])
Dy[ii] = np.sqrt(trapz((q1i-q2warp)**2,time))
gamma_stats = uf.statsFun(gamma_mat)
results_o = collections.namedtuple('align_bayes', ['f2_warped', 'gamma','g_coef', 'psi', 'sigma1', 'accept', 'betas_ind', 'logl', 'gamma_mat', 'gamma_stats', 'xdist', 'ydist'])
out = results_o(f2_warped, result_posterior_gamma, g_coef, result_posterior_psi, sigma1, accept[1:], accept_betas[1:], logl, gamma_mat, gamma_stats, Dx, Dy)
return(out)
def pairwise_align_bayes_infHMC(y1i, y2i, time, mcmcopts=None):
"""
This function aligns two functions using Bayesian framework. It uses a
hierarchical Bayesian framework assuming mearsurement error error It will
align f2 to f1. It is based on mapping warping functions to a hypersphere,
and a subsequent exponential mapping to a tangent space. In the tangent space,
the \infty-HMC algorithm is used to explore both local and global
structure in the posterior distribution.
Usage: out = pairwise_align_bayes_infHMC(f1i, f2i, time)
out = pairwise_align_bayes_infHMC(f1i, f2i, time, mcmcopts)
:param y1i: vector defining M samples of function 1
:param y2i: vector defining M samples of function 2
:param time: time vector of length M
:param mcmopts: dict of mcmc parameters
:type mcmcopts: dict
default mcmc options:
mcmcopts = {"iter":1*(10**4), "nchains":4, "vpriorvar":1,
"burnin":np.minimum(5*(10**3),2*(10**4)//2),
"alpha0":0.1, "beta0":0.1, "alpha":1, "beta":1,
"h":0.01, "L":4, "f1propvar":0.0001, "f2propvar":0.0001,
"L1propvar":0.3, "L2propvar":0.3, "npoints":200, "thin":1,
"sampfreq":1, "initcoef":np.repeat(0,20), "nbasis":10,
"basis":'fourier', "extrainfo":True}
Basis can be 'fourier' or 'legendre'
:rtype collection containing
:return f2_warped: aligned f2
:return gamma: warping function
:return v_coef: final v_coef
:return psi: final psi
:return sigma1: final sigma
if extrainfo
:return theta_accept: accept of psi samples
:return f2_accept: accept of f2 samples
:return SSE: SSE
:return gamma_mat: posterior gammas
:return gamma_stats: posterior gamma stats
:return xdist: phase distance posterior
:return ydist: amplitude distance posterior)
<NAME>, <NAME>, and <NAME>. “Multimodal Bayesian Registration of Noisy Functions using Hamiltonian Monte Carlo”, Computational Statistics and Data Analysis, accepted, 2021.
"""
if mcmcopts is None:
mcmcopts = {"iter":1*(10**4), "nchains":4 , "vpriorvar":1,
"burnin":np.minimum(5*(10**3),2*(10**4)//2),
"alpha0":0.1, "beta0":0.1, "alpha":1, "beta":1,
"h":0.01, "L":4, "f1propvar":0.0001, "f2propvar":0.0001,
"L1propvar":0.3, "L2propvar":0.3, "npoints":200, "thin":1,
"sampfreq":1, "initcoef":np.repeat(0,20), "nbasis":10,
"basis":'fourier', "extrainfo":True}
if y1i.shape[0] != y2i.shape[0]:
raise Exception('Length of f1 and f2 must be equal')
if y1i.shape[0] != time.shape[0]:
raise Exception('Length of f1 and time must be equal')
if np.mod(mcmcopts["initcoef"].shape[0], 2) != 0:
raise Exception('Length of mcmcopts.initcoef must be even')
if np.mod(mcmcopts["nbasis"], 2) != 0:
raise Exception('Length of mcmcopts.nbasis must be even')
# set up random start points for more than 1 chain
random_starts = np.zeros((mcmcopts["initcoef"].shape[0], mcmcopts["nchains"]))
if mcmcopts["nchains"] > 1:
for i in range(0, mcmcopts["nchains"]):
randcoef = -1 + (2)*rand(mcmcopts["initcoef"].shape[0])
random_starts[:, i] = randcoef
isparallel = True
if mcmcopts["nchains"] == 1:
isparallel = False
if isparallel:
mcmcopts_p = []
for i in range(0, mcmcopts["nchains"]):
mcmcopts["initcoef"] = random_starts[:, i]
mcmcopts_p.append(mcmcopts)
# run chains
if isparallel:
chains = Parallel(n_jobs=-1)(delayed(run_mcmc)(y1i, y2i, time,
mcmcopts_p[n]) for n in range(mcmcopts["nchains"]))
else:
chains = []
chains1 = run_mcmc(y1i, y2i, time, mcmcopts)
chains.append(chains1)
# combine outputs
Nsamples = chains[0]['f1'].shape[0]
M = chains[0]['f1'].shape[1]
f1 = np.zeros((Nsamples*mcmcopts["nchains"], M))
f2 = np.zeros((Nsamples*mcmcopts["nchains"], M))
gamma = np.zeros((M, mcmcopts["nchains"]))
v_coef = np.zeros((Nsamples*mcmcopts["nchains"], chains[0]['v_coef'].shape[1]))
psi = np.zeros((M, Nsamples*mcmcopts["nchains"]))
sigma = np.zeros(Nsamples*mcmcopts["nchains"])
sigma1 = np.zeros(Nsamples*mcmcopts["nchains"])
sigma2 = np.zeros(Nsamples*mcmcopts["nchains"])
s1 = np.zeros(Nsamples*mcmcopts["nchains"])
s2 = np.zeros(Nsamples*mcmcopts["nchains"])
L1 = np.zeros(Nsamples*mcmcopts["nchains"])
L2 = np.zeros(Nsamples*mcmcopts["nchains"])
f2_warped_mu = np.zeros((M, mcmcopts["nchains"]))
if mcmcopts["extrainfo"]:
Nsamplesa = chains[0]['theta_accept'].shape[0]
theta_accept = np.zeros(Nsamplesa*mcmcopts["nchains"])
f1_accept = np.zeros(Nsamplesa*mcmcopts["nchains"])
f2_accept = np.zeros(Nsamplesa*mcmcopts["nchains"])
L1_accept = np.zeros(Nsamplesa*mcmcopts["nchains"])
L2_accept = np.zeros(Nsamplesa*mcmcopts["nchains"])
gamma_mat = np.zeros((M,Nsamplesa*mcmcopts["nchains"]))
SSE = np.zeros((Nsamplesa+1)*mcmcopts["nchains"])
logl = np.zeros((Nsamplesa+1)*mcmcopts["nchains"])
f2_warped = np.zeros((Nsamples*mcmcopts["nchains"], M))
phasedist = np.zeros(Nsamples*mcmcopts["nchains"])
ampdist = np.zeros(Nsamples*mcmcopts["nchains"])
for i in range(0, mcmcopts["nchains"]):
a = (i)*Nsamples
b = (i+1)*Nsamples
f1[a:b, :] = chains[i]['f1']
f2[a:b, :] = chains[i]['f2']
gamma[:, i] = chains[i]['gamma']
v_coef[a:b, :] = chains[i]['v_coef']
psi[:, i] = chains[i]['psi']
sigma[a:b] = chains[i]['sigma']
sigma1[a:b] = chains[i]['sigma1']
sigma2[a:b] = chains[i]['sigma2']
s1[a:b] = chains[i]['s1']
s2[a:b] = chains[i]['s2']
L1[a:b] = chains[i]['L1']
L2[a:b] = chains[i]['L2']
f2_warped_mu[:, i] = chains[i]['f2_warped_mu']
if mcmcopts["extrainfo"]:
a1 = (i)*Nsamplesa
b1 = (i+1)*Nsamplesa
theta_accept[a1:b1] = chains[i]['theta_accept']
f1_accept[a1:b1] = chains[i]['f1_accept']
f2_accept[a1:b1] = chains[i]['f2_accept']
L1_accept[a1:b1] = chains[i]['L1_accept']
L2_accept[a1:b1] = chains[i]['L2_accept']
gamma_mat[:, a:b] = chains[i]['gamma_mat']
a1 = (i)*(Nsamplesa)
b1 = (i+1)*Nsamplesa
SSE[a1:b1] = chains[i]['SSE']
logl[a1:b1] = chains[i]['logl']
f2_warped[a:b, :] = chains[i]['f2_warped']
phasedist[a:b] = chains[i]['phasedist']
ampdist[a:b] = chains[i]['ampdist']
# finding modes
if mcmcopts["nchains"] > 1:
Dx = np.zeros((mcmcopts["nchains"], mcmcopts["nchains"]))
time1 = np.linspace(0,1,gamma.shape[0])
binsize = np.diff(time1)
binsize = binsize.mean()
for i in range(0, mcmcopts["nchains"]):
for j in range(i+1,mcmcopts["nchains"]):
psi1 = np.sqrt(np.gradient(gamma[:, i], binsize))
psi2 = np.sqrt(np.gradient(gamma[:, j], binsize))
q1dotq2 = trapz(psi1*psi2, time1)
if q1dotq2 > 1:
q1dotq2 = 1
elif q1dotq2 < -1:
q1dotq2 = -1
Dx[i,j] = np.real(np.arccos(q1dotq2))
Dx = Dx + Dx.T
# cluster modes
y = squareform(Dx)
Z = linkage(y, method='complete')
cutoff = np.median(Dx)
T = fcluster(Z, cutoff, criterion='distance')
N = np.unique(T)
# find mean and confidence region of cluster
posterior_gamma_modes = np.zeros((M, N.shape[0]))
posterior_gamma_modes_cr = np.zeros((M, 2, N.shape[0]))
for i in range(1, N.shape[0]+1):
idx = np.where(T == i)[0]
tmp = np.zeros((M, Nsamples*idx.shape[0]))
for j in range(0, idx.shape[0]):
a = (j)*Nsamples
b = (j+1)*Nsamples
tmp[:, a:b] = chains[idx[j]]['gamma_mat']
mu, gam_mu, psi, vec = uf.SqrtMean(tmp)
posterior_gamma_modes[:, i-1] = gam_mu
posterior_gamma_modes_cr[:, :, i-1] = uf.statsFun(tmp)
# thining
f1 = f1[0::mcmcopts["thin"], :]
f2 = f2[0::mcmcopts["thin"], :]
v_coef = v_coef[0::mcmcopts["thin"], :]
sigma = sigma[0::mcmcopts["thin"]]
sigma1 = sigma1[0::mcmcopts["thin"]]
sigma2 = sigma2[0::mcmcopts["thin"]]
s1 = s1[0::mcmcopts["thin"]]
s2 = s2[0::mcmcopts["thin"]]
L1 = L1[0::mcmcopts["thin"]]
L2 = L2[0::mcmcopts["thin"]]
if mcmcopts["extrainfo"]:
theta_accept = theta_accept[0::mcmcopts["thin"]]
f1_accept = f1_accept[0::mcmcopts["thin"]]
f2_accept = f2_accept[0::mcmcopts["thin"]]
L1_accept = L1_accept[0::mcmcopts["thin"]]
L2_accept = L2_accept[0::mcmcopts["thin"]]
gamma_mat = gamma_mat[:, 0::mcmcopts["thin"]]
SSE = SSE[0::mcmcopts["thin"]]
logl = logl[0::mcmcopts["thin"]]
f2_warped = f2_warped[0::mcmcopts["thin"], :]
phasedist = phasedist[0::mcmcopts["thin"]]
ampdist = ampdist[0::mcmcopts["thin"]]
if mcmcopts["extrainfo"]:
results_o = collections.namedtuple('align_bayes_HMC', ['f1', 'f2', 'gamma', 'v_coef', 'psi', 'sigma', 'sigma1', 'sigma2', 's1', 's2', 'L1', 'L2', 'f2_warped_mu', 'theta_accept', 'f1_accept', 'f2_accept', 'L1_accept', 'L2_accept', 'gamma_mat', 'SSE', 'logl', 'f2_warped', 'phasedist', 'ampdist'])
out = results_o(f1, f2, gamma, v_coef, psi, sigma, sigma1, sigma2, s1, s2, L1, L2, f2_warped_mu,
theta_accept, f1_accept, f2_accept, L1_accept, L2_accept, gamma_mat, SSE, logl,
f2_warped, phasedist, ampdist)
else:
results_o = collections.namedtuple('align_bayes_HMC', ['f1', 'f2', 'gamma', 'v_coef', 'psi', 'sigma', 'sigma1', 'sigma2', 's1', 's2', 'L1', 'L2', 'f2_warped_mu'])
out = results_o(f1, f2, gamma, v_coef, psi, sigma, sigma1, sigma2, s1, s2, L1, L2, f2_warped_mu)
return(out)
def run_mcmc(y1i, y2i, time, mcmcopts):
# Number of sig figs to report in gamma_mat
SIG_GAM = 13
iter = mcmcopts["iter"]
T = time.shape[0]
# normalize time to [0,1]
time = (time - time.min())/(time.max()-time.min())
# parameter settings
pw_sim_global_burnin = mcmcopts["burnin"]
valid_index = np.arange(pw_sim_global_burnin-1,iter)
ncoef = mcmcopts["initcoef"].shape[0]
nbasis = mcmcopts["nbasis"]
pw_sim_global_Mv = ncoef//2
numSimPoints = T
pw_sim_global_domain_par = np.linspace(0,1,numSimPoints)
d_basis = uf.basis_fourierd(pw_sim_global_domain_par, nbasis)
if mcmcopts["basis"] == 'fourier':
v_basis = uf.basis_fourier(pw_sim_global_domain_par, pw_sim_global_Mv, 1)
elif mcmcopts["basis"] == 'legendre':
v_basis = uf.basis_legendre(pw_sim_global_domain_par, pw_sim_global_Mv, 1)
else:
raise Exception('Incorrect Basis Specified')
sigma_ini = 1
v_priorvar = mcmcopts["vpriorvar"]
v_coef_ini = mcmcopts["initcoef"]
D = pdist(time.reshape((time.shape[0],1)))
Dmat = squareform(D)
C = v_priorvar / np.repeat(np.arange(1,pw_sim_global_Mv+1), 2)
cholC = cholesky(np.diag(C))
h = mcmcopts["h"]
L = mcmcopts["L"]
def propose_v_coef(v_coef_curr):
v_coef_new = normal(v_coef_curr, C.T)
return v_coef_new
# f1,f2 prior, propoposal params
sigma1_ini = 0.01
sigma2_ini = 0.01
f1_propvar = mcmcopts["f1propvar"]
f2_propvar = mcmcopts["f2propvar"]
y1itmp = y1i[0::mcmcopts["sampfreq"]]
timetmp = time[0::mcmcopts["sampfreq"]]
kernel1 = GPy.kern.RBF(input_dim=1, variance=y1itmp.std()/np.sqrt(2), lengthscale=np.mean(timetmp.std()))
y2itmp = y2i[0::mcmcopts["sampfreq"]]
kernel2 = GPy.kern.RBF(input_dim=1, variance=y2itmp.std()/ | np.sqrt(2) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 19:50:56 2020
@author: hiroyasu
"""
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import control
import SCPmulti as scp
import pickle
import TrainRNN as trnn
import torch
import pandas as pd
DT = scp.DT
TSPAN = scp.TSPAN
M = scp.M
II = scp.II
L = scp.L
bb = scp.bb
FMIN = scp.FMIN
FMAX = scp.FMAX
RungeNum = scp.RungeNum
AA = scp.AA
Robs = scp.Robs
Rsafe = scp.Rsafe
XOBSs = scp.XOBSs
XXd0 = | np.load('data/params/desired_n/Xhis.npy') | numpy.load |
"""
Author: <NAME> (<EMAIL>, http://personales.upv.es/jon)
Version: 1.0
Date: September 2021
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
Computing the confusion matrix for a K-Means-based naive classifier
"""
import os
import sys
import numpy
import pickle
from machine_learning import KMeans
try:
from pyspark import SparkContext
except:
pass
if __name__ == "__main__":
"""
Usage: spark-submit --master local[4] python/kmeans_uc13_compute_confusion_matrix_spark.py \
--dataset data/uc13.csv \
--codebook models/kmeans_model-uc13-1000.pkl 2>/dev/null
"""
verbose = 0
dataset_filename = 'data/uc13.csv'
codebook_filename = 'models/kmeans_model-uc13-200.pkl'
spark_context = None
num_partitions = 40
batch_size = 100
for i in range(len(sys.argv)):
if sys.argv[i] == "--dataset":
dataset_filename = sys.argv[i + 1]
elif sys.argv[i] == "--verbosity":
verbose = int(sys.argv[i + 1])
elif sys.argv[i] == "--num-partitions":
num_partitions = int(sys.argv[i + 1])
elif sys.argv[i] == "--batch-size":
batch_size = int(sys.argv[i + 1])
elif sys.argv[i] == "--codebook":
codebook_filename = sys.argv[i + 1]
spark_context = SparkContext(appName = "K-Means compute confusion matrix")
with open(codebook_filename, 'rb') as f:
centers = pickle.load(f)
f.close()
kmeans = KMeans()
kmeans.n_clusters = len(centers)
kmeans.cluster_centers_ = | numpy.array(centers) | numpy.array |
# -*- coding: utf-8 -*-
# LICENCE
from __future__ import absolute_import, division, print_function, unicode_literals
import six # NOQA
from six.moves import zip
import numpy as np
from vtool_ibeis import histogram as htool
from vtool_ibeis import keypoint as ktool
from vtool_ibeis import linalg as ltool
from vtool_ibeis import image as gtool
from vtool_ibeis import trig
import utool as ut
import ubelt as ub
from .util_math import TAU
try:
import cv2
except ImportError as ex:
print('ERROR: import cv2 is failing!')
cv2 = ut.DynStruct()
cv2.INTER_LANCZOS4 = None
cv2.INTER_CUBIC = None
cv2.BORDER_CONSTANT = None
cv2.BORDER_REPLICATE = None
def patch_gradient(patch, ksize=1, gaussian_weighted=False):
patch_ = np.array(patch, dtype=np.float64)
gradx = cv2.Sobel(patch_, cv2.CV_64F, 1, 0, ksize=ksize)
grady = cv2.Sobel(patch_, cv2.CV_64F, 0, 1, ksize=ksize)
if gaussian_weighted:
gausspatch = gaussian_patch(shape=gradx.shape)
gausspatch /= gausspatch.max()
gradx *= gausspatch
grady *= gausspatch
return gradx, grady
def patch_mag(gradx, grady):
return np.sqrt((gradx ** 2) + (grady ** 2))
def patch_ori(gradx, grady):
""" returns patch orientation relative to the x-axis """
gori = trig.atan2(grady, gradx)
return gori
def get_test_patch(key='star', jitter=False):
r"""
Args:
key (str):
jitter (bool):
Returns:
ndarray: patch
CommandLine:
python -m vtool_ibeis.patch --test-get_test_patch --show
Example:
>>> # DISABLE_DOCTEST
>>> from vtool_ibeis.patch import * # NOQA
>>> import plottool_ibeis as pt
>>> key = 'star2'
>>> jitter = False
>>> patch = get_test_patch(key, jitter)
>>> pt.imshow(255 * patch)
>>> pt.show_if_requested()
"""
func = {
'star2': get_star2_patch,
'cross': get_cross_patch,
'star': get_star_patch,
'stripe': get_stripe_patch,
}[key]
patch = func(jitter)
return patch
def make_test_image_keypoints(imgBGR, scale=1.0, skew=0, theta=0, shift=(0, 0)):
h, w = imgBGR.shape[0:2]
half_w, half_h = w / 2.0, h / 2.0
x, y = (half_w - .5) + (w * shift[0]), (half_h - .5) + (h * shift[1])
a = (half_w) * scale
c = skew
d = (half_h) * scale
theta = theta
kpts = np.array([[x, y, a, c, d, theta]], np.float32)
return kpts
def get_no_symbol(variant='symbol', size=(100, 100)):
r"""
Returns:
ndarray: errorimg
CommandLine:
python -m vtool_ibeis.patch --test-get_no_symbol --show
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.patch import * # NOQA
>>> errorimg = get_no_symbol()
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> pt.imshow(errorimg)
>>> ut.show_if_requested()
"""
thickness = 2
shape = (size[1], size[0], 3)
errorimg = np.zeros(shape)
center = (size[0] // 2, size[1] // 2)
radius = min(center) - thickness
color_bgr = [0, 0, 255]
tau = 2 * np.pi
angle = 45 / 360 * tau
pt1 = (center[0] - int(np.sin(angle) * radius), center[1] - int(np.cos(angle) * radius))
pt2 = (center[0] + int(np.sin(angle) * radius), center[1] + int(np.cos(angle) * radius))
if variant == 'symbol':
cv2.circle(errorimg, center, radius, color_bgr, thickness)
cv2.line(errorimg, pt1, pt2, color_bgr, thickness)
else:
import vtool_ibeis as vt
fontFace = cv2.FONT_HERSHEY_PLAIN
org = (size[0] * .1, size[1] * .6)
fontkw = dict(bottomLeftOrigin=False, fontScale=2.5, fontFace=fontFace)
vt.draw_text(errorimg, 'NaN', org, thickness=2,
textcolor_rgb=color_bgr[::-1], **fontkw)
return errorimg
def get_star_patch(jitter=False):
""" test data patch """
_, O = .1, .8
patch = np.array([
[_, _, _, O, _, _, _],
[_, _, _, O, _, _, _],
[_, _, O, O, O, _, _],
[O, O, O, O, O, O, O],
[_, O, O, O, O, O, _],
[_, _, O, O, O, _, _],
[_, O, O, O, O, O, _],
[_, O, _, _, _, O, _],
[O, _, _, _, _, _, O]])
if jitter:
patch += np.random.rand(*patch.shape) * .1
return patch
def get_star2_patch(jitter=False):
""" test data patch """
_, i, O = .1, .8, .5
patch = np.array([
[_, _, _, _, _, _, _, O, O, _, _, _, _, _, _, _],
[_, _, _, _, _, _, O, i, i, O, _, _, _, _, _, _],
[_, _, _, _, _, _, O, i, i, O, _, _, _, _, _, _],
[_, _, _, _, _, O, i, i, i, i, O, _, _, _, _, _],
[O, O, O, O, O, O, i, i, i, i, O, O, O, O, O, O],
[O, i, i, i, i, i, i, i, i, i, i, i, i, i, i, O],
[_, O, i, i, i, i, O, i, i, O, i, i, i, i, O, _],
[_, _, O, i, i, i, O, i, i, O, i, i, i, O, _, _],
[_, _, _, O, i, i, O, i, i, O, i, i, O, _, _, _],
[_, _, _, O, i, i, i, i, i, i, i, i, O, _, _, _],
[_, _, O, i, i, i, i, i, i, i, i, i, i, O, _, _],
[_, _, O, i, i, i, i, i, i, i, i, i, i, O, _, _],
[_, O, i, i, i, i, i, O, O, i, i, i, i, i, O, _],
[_, O, i, i, i, O, O, _, _, O, O, i, i, i, O, _],
[O, i, i, O, O, _, _, _, _, _, _, O, O, i, i, O],
[O, O, O, _, _, _, _, _, _, _, _, _, _, O, O, O]])
if jitter:
patch += np.random.rand(*patch.shape) * .1
return patch
def get_cross_patch(jitter=False):
""" test data patch """
_, O = .1, .8
patch = np.array([
[_, _, O, O, O, _, _],
[_, _, O, O, O, _, _],
[_, _, O, O, O, _, _],
[O, O, O, O, O, O, O],
[O, O, O, O, O, O, O],
[_, _, O, O, O, _, _],
[_, _, O, O, O, _, _],
[_, _, O, O, O, _, _],
[_, _, O, O, O, _, _]])
if jitter:
patch += np.random.rand(*patch.shape) * .1
return patch
def get_stripe_patch(jitter=False):
""" test data patch """
_, O = .1, .8
patch = np.array([
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _],
[O, O, O, _, _, _, _]])
if jitter:
patch += np.random.rand(*patch.shape) * .1
return patch
def test_show_gaussian_patches2(shape=(19, 19)):
r"""
CommandLine:
python -m vtool_ibeis.patch --test-test_show_gaussian_patches2 --show
python -m vtool_ibeis.patch --test-test_show_gaussian_patches2 --show --shape=7,7
python -m vtool_ibeis.patch --test-test_show_gaussian_patches2 --show --shape=19,19
python -m vtool_ibeis.patch --test-test_show_gaussian_patches2 --show --shape=41,41
python -m vtool_ibeis.patch --test-test_show_gaussian_patches2 --show --shape=41,7
References:
http://matplotlib.org/examples/mplot3d/surface3d_demo.html
Example:
>>> # DISABLE_DOCTEST
>>> from vtool_ibeis.patch import * # NOQA
>>> from mpl_toolkits.mplot3d import Axes3D # NOQA
>>> import plottool_ibeis as pt
>>> shape = ut.get_argval(('--shape',), type_=list, default=[19, 19])
>>> test_show_gaussian_patches2(shape=shape)
>>> pt.show_if_requested()
"""
from mpl_toolkits.mplot3d import Axes3D # NOQA
import plottool_ibeis as pt
import numpy as np
import matplotlib as mpl
import vtool_ibeis as vt
shape = tuple(map(int, shape))
print('shape = %r' % (shape,))
#shape = (27, 27)
#shape = (7, 7)
#shape = (41, 41)
#shape = (5, 5)
#shape = (3, 3)
sigma_percent_list = [.1, .3, .5, .6, .7, .8, .9, .95, 1.0]
#np.linspace(.1, 3, 9)
ybasis = np.arange(shape[0])
xbasis = np.arange(shape[1])
xgrid, ygrid = np.meshgrid(xbasis, ybasis)
fnum = pt.next_fnum()
for sigma_percent in pt.param_plot_iterator(sigma_percent_list, fnum=fnum, projection='3d'):
radius1 = shape[0]
radius2 = shape[1]
sigma1 = radius1 * sigma_percent
sigma2 = radius2 * sigma_percent
sigma = [sigma1, sigma2]
gausspatch = vt.gaussian_patch(shape, sigma=sigma)
#print(gausspatch)
#pt.imshow(gausspatch * 255)
pt.plot_surface3d(xgrid, ygrid, gausspatch, rstride=1, cstride=1,
cmap=mpl.cm.coolwarm, title='sigma_percent=%.3f' % (sigma_percent,))
pt.update()
pt.set_figtitle('2d gaussian kernels')
def show_gaussian_patch(shape, sigma1, sigma2):
from mpl_toolkits.mplot3d import Axes3D # NOQA
import matplotlib as mpl
import plottool_ibeis as pt
import vtool_ibeis as vt
ybasis = np.arange(shape[0])
xbasis = np.arange(shape[1])
xgrid, ygrid = np.meshgrid(xbasis, ybasis)
sigma = [sigma1, sigma2]
gausspatch = vt.gaussian_patch(shape, sigma=sigma)
#print(gausspatch)
#pt.imshow(gausspatch * 255)
title = 'ksize=%r, sigma=%r' % (shape, (sigma1, sigma2),)
pt.plot_surface3d(xgrid, ygrid, gausspatch, rstride=1, cstride=1,
cmap=mpl.cm.coolwarm, title=title)
def inverted_sift_patch(sift, dim=32):
"""
Idea for inverted sift visualization
CommandLine:
python -m vtool_ibeis.patch test_sift_viz --show --name=star
python -m vtool_ibeis.patch test_sift_viz --show --name=star2
python -m vtool_ibeis.patch test_sift_viz --show --name=cross
python -m vtool_ibeis.patch test_sift_viz --show --name=stripe
Example:
>>> # DISABLE_DOCTEST
>>> from vtool_ibeis.patch import * # NOQA
>>> import vtool_ibeis as vt
>>> patch = vt.get_test_patch(ut.get_argval('--name', default='star'))
>>> sift = vt.extract_feature_from_patch(patch)
>>> siftimg = test_sift_viz(sift)
>>> # Need to do some image blending
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> pt.figure(fnum=1, pnum=(1, 2, 1))
>>> pt.mpl_sift.draw_sift_on_patch(siftimg, sift)
>>> pt.figure(fnum=1, pnum=(1, 2, 2))
>>> patch2 = patch
>>> patch2 = vt.rectify_to_uint8(patch2)
>>> patch2 = vt.rectify_to_square(patch2)
>>> pt.mpl_sift.draw_sift_on_patch(patch2, sift)
>>> ut.show_if_requested()
"""
import vtool_ibeis as vt
# dim = 21
pad = dim // 2 + (dim % 2)
# pad = 0
blocks = []
for siftmags in ut.ichunks(sift, 8):
thetas = | np.linspace(0, TAU, 8, endpoint=False) | numpy.linspace |
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
import pdb
import sys
sys.path.append('../data')
from pytorch_data_operations import buildLakeDataForRNN_manylakes_finetune2, parseMatricesFromSeqs
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, DataLoader
from torch.nn.init import xavier_normal_
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score
from sklearn.neural_network import MLPRegressor
from scipy.stats import spearmanr
from joblib import dump, load
import re
import os
metadata = pd.read_feather("../../metadata/lake_metadata_2700plus.feather")
sites = pd.read_csv('../../metadata/sites_moreThan10ProfilesWithGLM_Mar2020Update.csv')
ids = pd.read_csv('../../metadata/pball_site_ids.csv', header=None)
ids = ids[0].values
glm_all_f = pd.read_csv("../../results/glm_transfer/RMSE_transfer_glm_pball.csv")
train_df = pd.read_feather("../../results/transfer_learning/glm/train_rmses_pball.feather")
train_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in np.unique(glm_all_f['target_id'].values)]
n_lakes = len(train_lakes)
all_sites = metadata['site_id'].values
test_lakes = np.array(all_sites[~np.isin(all_sites, train_lakes)])
metadata.set_index('site_id', inplace=True)
output_to_file = True
biases = []
verbose=False
# print(train_lakes.shape[0], " training lakes")
#temp
# test_lakes = np.array(['{B5AAC6E6-9ED3-413C-B1D8-9094367B2EEC}'])
csv = []
csv.append(",".join(["source_id","pgdl_rmse","glm_rmse"]))
test_lakes = np.array(train_lakes)
# test_lakes = train_lakes
rmse_per_lake = np.empty(test_lakes.shape[0])
glm_rmse_per_lake = np.empty(test_lakes.shape[0])
srcorr_per_lake = np.empty(test_lakes.shape[0])
meta_rmse_per_lake = np.empty(test_lakes.shape[0])
med_meta_rmse_per_lake = np.empty(test_lakes.shape[0])
rmse_per_lake[:] = np.nan
glm_rmse_per_lake[:] = np.nan
meta_rmse_per_lake[:] = np.nan
csv = []
csv.append('target_id,rmse,rmse_pred,rmse_pred_lower,rmse_pred_upper,rmse_pred_med,spearman,glm_rmse,site_id')
err_per_source = np.empty((9,len(train_lakes)))
# test_lakes = np.array(['120020398'])
for targ_ct, target_id in enumerate(train_lakes): #for each target lake
nid = target_id
# if nid == '120018008' or nid == '120020307' or nid == '120020636' or nid == '32671150' or nid =='58125241'or nid=='120020800' or nid=='91598525':
# continue
print("target lake ",targ_ct,"/",len(train_lakes),": ", target_id)
top_ids = [target_id]
#define target test data to use
data_dir_target = "../../data/processed/lake_data/"+target_id+"/"
#target agnostic model and data params
use_gpu = True
n_features = 8
# n_hidden = 20
seq_length = 350
win_shift = 175
begin_loss_ind = 0
(_, _, tst_data_target, tst_dates_target, unique_tst_dates_target, all_data_target, all_phys_data_target, all_dates_target,
_) = buildLakeDataForRNN_manylakes_finetune2(target_id, data_dir_target, seq_length, n_features,
win_shift = win_shift, begin_loss_ind = begin_loss_ind,
outputFullTestMatrix=True, allTestSeq=True)
#useful values, LSTM params
batch_size = all_data_target.size()[0]
u_depths_target = np.unique(all_data_target[:,0,0])
n_depths = torch.unique(all_data_target[:,:,0]).size()[0]
n_test_dates_target = unique_tst_dates_target.shape[0]
#define LSTM model
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, batch_size):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_size = batch_size
self.lstm = nn.LSTM(input_size = n_features, hidden_size=hidden_size, batch_first=True)
self.out = nn.Linear(hidden_size, 1)
self.hidden = self.init_hidden()
def init_hidden(self, batch_size=0):
# initialize both hidden layers
if batch_size == 0:
batch_size = self.batch_size
ret = (xavier_normal_(torch.empty(1, batch_size, self.hidden_size)),
xavier_normal_(torch.empty(1, batch_size, self.hidden_size)))
if use_gpu:
item0 = ret[0].cuda(non_blocking=True)
item1 = ret[1].cuda(non_blocking=True)
ret = (item0,item1)
return ret
def forward(self, x, hidden): #forward network propagation
self.lstm.flatten_parameters()
x = x.float()
x, hidden = self.lstm(x, self.hidden)
self.hidden = hidden
x = self.out(x)
return x, hidden
#output matrix
n_lakes = len(top_ids)
output_mats = np.empty((n_lakes, n_depths, n_test_dates_target))
ind_rmses = np.empty((n_lakes))
ind_rmses[:] = np.nan
label_mats = np.empty((n_depths, n_test_dates_target))
output_mats[:] = np.nan
label_mats[:] = np.nan
for i, source_id in enumerate(top_ids):
#for each top id
#load source model
load_path = "../../models/single_lake_models/"+source_id+"/PGRNN_source_model_0.7"
n_hidden = torch.load(load_path)['state_dict']['out.weight'].shape[1]
lstm_net = LSTM(n_features, n_hidden, batch_size)
if use_gpu:
lstm_net = lstm_net.cuda(0)
pretrain_dict = torch.load(load_path)['state_dict']
model_dict = lstm_net.state_dict()
pretrain_dict = {key: v for key, v in pretrain_dict.items() if key in model_dict}
model_dict.update(pretrain_dict)
lstm_net.load_state_dict(pretrain_dict)
#things needed to predict test data
mse_criterion = nn.MSELoss()
testloader = torch.utils.data.DataLoader(tst_data_target, batch_size=tst_data_target.size()[0], shuffle=False, pin_memory=True)
lstm_net.eval()
with torch.no_grad():
avg_mse = 0
ct = 0
for m, data in enumerate(testloader, 0):
#now for mendota data
#this loop is dated, there is now only one item in testloader
#parse data into inputs and targets
inputs = data[:,:,:n_features].float()
targets = data[:,:,-1].float()
targets = targets[:, begin_loss_ind:]
tmp_dates = tst_dates_target[:, begin_loss_ind:]
depths = inputs[:,:,0]
if use_gpu:
inputs = inputs.cuda()
targets = targets.cuda()
#run model
h_state = None
lstm_net.hidden = lstm_net.init_hidden(batch_size=inputs.size()[0])
pred, h_state = lstm_net(inputs, h_state)
pred = pred.view(pred.size()[0],-1)
pred = pred[:, begin_loss_ind:]
#calculate error
targets = targets.cpu()
loss_indices = np.where(~np.isnan(targets))
if use_gpu:
targets = targets.cuda()
inputs = inputs[:, begin_loss_ind:, :]
depths = depths[:, begin_loss_ind:]
mse = mse_criterion(pred[loss_indices], targets[loss_indices])
# print("test loss = ",mse)
avg_mse += mse
if mse > 0: #obsolete i think
ct += 1
avg_mse = avg_mse / ct
#save model
(outputm_npy, labelm_npy) = parseMatricesFromSeqs(pred.cpu().numpy(), targets.cpu().numpy(), depths, tmp_dates, n_depths,
n_test_dates_target, u_depths_target,
unique_tst_dates_target)
#store output
output_mats[i,:,:] = outputm_npy
if i == 0:
#store label
label_mats = labelm_npy
loss_output = outputm_npy[~np.isnan(labelm_npy)]
loss_label = labelm_npy[~np.isnan(labelm_npy)]
mat_rmse = np.sqrt(((loss_output - loss_label) ** 2).mean())
print(source_id+" rmse=", mat_rmse)
err_per_source[i,targ_ct] = mat_rmse
outputm_npy = np.transpose(outputm_npy)
output_df = pd.DataFrame(data=outputm_npy, columns=[str(float(x/2)) for x in range(outputm_npy.shape[1])], index=[str(x)[:10] for x in unique_tst_dates_target]).reset_index()
output_df.rename(columns={'index': 'depth'})
assert np.isfinite(np.array(output_df.values[:,1:],dtype=np.float32)).all(), "nan output"
output_df.to_feather('./source_pgdl_outputs/nhdhr_'+target_id+'.feather')
#save model
total_output_npy = np.average(output_mats, axis=0)
loss_output = total_output_npy[~np.isnan(label_mats)]
loss_label = label_mats[~ | np.isnan(label_mats) | numpy.isnan |
import numpy as np
import pytest
import apexpy
import tempfile
import os
import h5py
from ttools import create_dataset, config, io, utils
map_periods = [np.timedelta64(10, 'm'), np.timedelta64(30, 'm'), np.timedelta64(1, 'h'), np.timedelta64(2, 'h')]
@pytest.fixture
def times():
yield np.datetime64('2010-01-01T00:00:00') + np.arange(100) * np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_assemble_args(times, map_period):
mlat = np.arange(10)
mlt = np.arange(10)
ssmlon = np.random.rand(times.shape[0])
mlt, mlat = | np.meshgrid(mlt, mlat) | numpy.meshgrid |
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import time
from scipy import stats
from sklearn.metrics import r2_score
import math
# Force using CPU globally by hiding GPU(s)
tf.config.set_visible_devices([], 'GPU')
# import edl
import evidential_deep_learning as edl
import data_loader
import trainers
import models
from models.toy.h_params import h_params
import itertools
tf.config.threading.set_intra_op_parallelism_threads(1)
import random
data_name = 'flight_delay'
original_data_path = '../flight_delay_data/'
results_path = './Results_DER/'+data_name + '_DER_results.txt'
save_loss_history = False
save_loss_history_path = './Results_DER/loss_history/'
plot_loss_history = False
plot_loss_history_path = './Results_DER/loss_curves/'
parser = argparse.ArgumentParser()
parser.add_argument("--num-trials", default=1, type=int,
help="Number of trials to repreat training for \
statistically significant results.")
parser.add_argument("--num-epochs", default=100, type=int)
parser.add_argument('--datasets', nargs='+', default=["flight_delay"],
choices=['flight_delay'])
dataset = data_name
# learning_rate = h_params[dataset]["learning_rate"]
# batch_size = h_params[dataset]["batch_size"]
learning_rate = 1e-4
batch_size = 512
neurons = 100
### New flight delay data loader for customized train/test data same with PI3NN method
xTrain, yTrain, yTrain_scale, test_data_list = data_loader.load_flight_delays('../flight_delay_data/')
# '''choose the train/test dataset '''
x_train = xTrain
y_train = yTrain
y_scale = yTrain_scale
test_idx = 0 # [0, 1, 2, 3] for test 1,2,3,4
x_test = test_data_list[test_idx][0]
y_test = test_data_list[test_idx][1]
seed = 12345
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
args = parser.parse_args()
args.datasets[0] = data_name
training_schemes = [trainers.Evidential]
datasets = args.datasets
print('--- Printing datasets:')
print(datasets)
num_trials = args.num_trials
print('num_trials:{}'.format(num_trials))
# num_trials = 3
num_epochs = args.num_epochs
dev = "/cpu:0" # for small datasets/models cpu is faster than gpu
"""" ================================================"""
RMSE = np.zeros((len(datasets), len(training_schemes), num_trials))
NLL = np.zeros((len(datasets), len(training_schemes), num_trials))
PICP_arr = np.zeros(num_trials)
MPIW_arr = np.zeros(num_trials)
R2_arr = np.zeros(num_trials)
for di, dataset in enumerate(datasets):
# print(di)
# print(dataset)
for ti, trainer_obj in enumerate(training_schemes):
for n in range(num_trials):
print('*********************************************')
print('--- data: {}, trial: {}'.format(data_name, n+1))
print('*********************************************')
# batch_size = h_params[dataset]["batch_size"]
num_iterations = num_epochs * x_train.shape[0]//batch_size
print('num_epochs: {}, num_x_data: {}, batch_size: {}, total iters {} = {} * {} // {}'.format(num_epochs, x_train.shape[0], batch_size, num_iterations, num_epochs, x_train.shape[0], batch_size))
done = False
while not done:
with tf.device(dev):
model_generator = models.get_correct_model(dataset="toy", trainer=trainer_obj)
model, opts = model_generator.create(input_shape=x_train.shape[1:], num_neurons=neurons, tf_seed=seed)
trainer = trainer_obj(model, opts, dataset, learning_rate=learning_rate)
model, rmse, nll, loss = trainer.train(x_train, y_train, x_test, y_test, y_scale, batch_size=batch_size, iters=num_iterations,
verbose=True, data_name=data_name, rnd_seed=seed, trial_num=n,
bool_plot_loss=False, bool_save_loss=True,
save_loss_path=save_loss_history_path,
plot_loss_path=plot_loss_history_path)
''' Evaluate the PICP and MPIW for each trial '''
### taken from the 'plot_ng' function from the original evidential regression code
x_test_input_tf = tf.convert_to_tensor(x_test, tf.float32)
outputs = model(x_test_input_tf)
mu, v, alpha, beta = tf.split(outputs, 4, axis=1)
epistemic_var = np.sqrt(beta / (v * (alpha - 1)))
epistemic_var = np.minimum(epistemic_var, 1e3)
y_pred_U = mu.numpy() + epistemic_var * 1.96
y_pred_L = mu.numpy() - epistemic_var * 1.96
# print('y_pred_U: {}'.format(y_pred_U))
# print('y_pred_L: {}'.format(y_pred_L))
''' Do same thing for training data in order to do OOD analysis '''
x_train_input_tf = tf.convert_to_tensor(x_train, tf.float32)
outputs_train = model(x_train_input_tf)
mu_train, v_train, alpha_train, beta_train = tf.split(outputs_train, 4, axis=1)
epistemic_var_train = np.sqrt(beta_train / (v_train * (alpha_train - 1)))
epistemic_var_train = np.minimum(epistemic_var_train, 1e3)
y_pred_U_train = mu_train.numpy() + epistemic_var_train * 1.96
y_pred_L_train = mu_train.numpy() - epistemic_var_train * 1.96
if np.isnan(y_pred_U).any() or np.isnan(y_pred_L).any():
PICP = math.nan
MPIW = math.nan
R2 = math.nan
rmse = math.nan
nll = math.nan
print('--- the y_pred_U/L contains NaN(s) in current trial')
else:
''' Calculate the confidence scores (y-axis) range from 0-1'''
y_U_cap_train = y_pred_U_train.flatten() > y_train
y_L_cap_train = y_pred_L_train.flatten() < y_train
MPIW_array_train = y_pred_U_train.flatten() - y_pred_L_train.flatten()
MPIW_train = np.mean(MPIW_array_train)
#### for test (evaluate each y_U_cap - y_L_cap in the pre-calculated MPIW_train single value
# for the confidence score)
print(y_pred_U.shape)
print(y_pred_L.shape)
print(y_test.reshape(-1).shape)
y_pred_U = y_pred_U.reshape(-1)
y_pred_L = y_pred_L.reshape(-1)
y_U_cap = y_pred_U > y_test
y_L_cap = y_pred_L < y_test
# print('y_U_cap: {}'.format(y_U_cap))
# print('y_L_cap: {}'.format(y_L_cap))
# print('y_L_cap: {}'.format(y_L_cap))
y_all_cap = y_U_cap * y_L_cap
PICP = np.sum(y_all_cap) / y_L_cap.shape[0]
MPIW_array = y_pred_U - y_pred_L
MPIW = np.mean(MPIW_array)
confidence_arr_test = [min(MPIW_train / test_width, 1.0) for test_width in MPIW_array]
confidence_arr_train = [min(MPIW_train / train_width, 1.0) for train_width in MPIW_array_train]
print('----------- OOD analysis --- confidence scores ----------------')
print('--- Train conf_scores MEAN: {}, STD: {}'.format(np.mean(confidence_arr_train), np.std(confidence_arr_train)))
print('--- Test: {} rank: {} conf_scores MEAN: {}, STD: {}'.format(test_idx+1, test_idx+1, np.mean(confidence_arr_test), np.std(confidence_arr_test)))
''' Calculate the L2 distance to the mean of training data (x-axis), range from 0-30'''
dist_arr_train = np.sqrt(np.sum(x_train ** 2.0, axis=1))
dist_arr_test = np.sqrt(np.sum(x_test ** 2.0, axis=1))
# print('dist_arr_train shape: {}'.format(dist_arr_train.shape))
# print('confidence arr train len: {}'.format(len(confidence_arr_train)))
# print('dist_arr_test shape: {}'.format(dist_arr_test.shape))
# print('confidence arr test len: {}'.format(len(confidence_arr_test)))
''' Save to file and plot the results '''
confidence_arr_train = np.array(confidence_arr_train)
confidence_arr_test = np.array(confidence_arr_test)
DER_OOD_train_np = np.hstack(
(dist_arr_train.reshape(-1, 1), confidence_arr_train.reshape(-1, 1)))
DER_OOD_test_np = np.hstack(
(dist_arr_test.reshape(-1, 1), confidence_arr_test.reshape(-1, 1)))
np.savetxt('DER_OOD_flight_delay_'+ str(test_idx+1) +'_train_np.txt', DER_OOD_train_np, delimiter=',')
np.savetxt('DER_OOD_flight_delay_'+ str(test_idx+1) +'_test_np.txt', DER_OOD_test_np, delimiter=',')
# plt.plot(dist_arr_train, confidence_arr_train, 'r.', label='Training data (in distribution)')
# plt.plot(dist_arr_test, confidence_arr_test, 'b.',label='testing data (out of distribution')
# plt.xlabel('L2 distance to the mean of training data $\{x_i\}_{i=1}^N$')
# plt.ylabel('The Confidence Score')
# plt.legend(loc='lower left')
# plt.title('DER flight delay test case '+ str(test_idx+1))
# # plt.ylim(0, 1.2)
# plt.savefig('DER_OOD_flight_delay_'+str(test_idx+1)+'.png')
# # plt.show()
R2 = r2_score(y_test, mu.numpy())
print('PICP: {}, MPIW: {}, R2: {}'.format(PICP, MPIW, R2))
del model
tf.keras.backend.clear_session()
done = False if np.isinf(nll) or np.isnan(nll) else True
### new added done criteria
if np.isnan(loss):
done = True
print("saving {} {}".format(rmse, nll))
RMSE[di, ti, n] = rmse
NLL[di, ti, n] = nll
PICP_arr[n] = PICP
MPIW_arr[n] = MPIW
R2_arr[n] = R2
print('PICP_arr: {}'.format(PICP_arr))
print('MPIW_arr: {}'.format(MPIW_arr))
print('R2_arr: {}'.format(R2_arr))
PICP_mean = np.nanmean(PICP_arr)
MPIW_mean = np.nanmean(MPIW_arr)
RMSE_mean = np.nanmean(RMSE)
NLL_mean = np.nanmean(NLL)
R2_mean = np.nanmean(R2_arr)
print('--- Mean PICP: {}'.format(PICP_mean))
print('--- Mean MPIW: {}'.format(MPIW_mean))
print('--- Mean RMSE: {}'.format(RMSE_mean))
print('--- Mean NLL: {}'.format(NLL_mean))
print('--- Mean R2: {}'.format(R2_mean))
RESULTS = | np.hstack((RMSE, NLL)) | numpy.hstack |
import pandas as pd
import numpy as np
import time
t0 = time.time()
number_array = | np.arange(1101,2000) | numpy.arange |
import collections
import numpy as np
import os
import time
from tqdm import tqdm
from apex import amp
import torch
import torch.nn.functional as F
from pycocotools.cocoeval import COCOeval
from simpleAICV.classification.common import ClassificationDataPrefetcher, AverageMeter, accuracy
from simpleAICV.detection.common import DetectionDataPrefetcher
from simpleAICV.segmentation.common import SegmentationDataPrefetcher
def validate_classification(val_loader, model, criterion, config):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
model_on_cuda = next(model.parameters()).is_cuda
for images, targets in tqdm(val_loader):
if model_on_cuda:
images, targets = images.cuda(), targets.cuda()
data_time.update(time.time() - end)
end = time.time()
outputs = model(images)
batch_time.update(time.time() - end)
loss = criterion(outputs, targets)
acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
end = time.time()
# per image data load time(ms) and inference time(ms)
per_image_load_time = data_time.avg / config.batch_size * 1000
per_image_inference_time = batch_time.avg / config.batch_size * 1000
return top1.avg, top5.avg, losses.avg, per_image_load_time, per_image_inference_time
def train_classification(train_loader, model, criterion, optimizer, scheduler,
epoch, logger, config):
'''
train classification model for one epoch
'''
top1 = AverageMeter()
top5 = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
local_rank = torch.distributed.get_rank() if config.distributed else None
if config.distributed:
gpus_num = torch.cuda.device_count()
iters = len(train_loader.dataset) // (
config.batch_size * gpus_num) if config.distributed else len(
train_loader.dataset) // config.batch_size
else:
iters = len(train_loader.dataset) // config.batch_size
prefetcher = ClassificationDataPrefetcher(train_loader)
images, targets = prefetcher.next()
iter_index = 1
while images is not None:
images, targets = images.cuda(), targets.cuda()
outputs = model(images)
loss = criterion(outputs, targets)
loss = loss / config.accumulation_steps
if config.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if iter_index % config.accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# measure accuracy and record loss
acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
images, targets = prefetcher.next()
if iter_index % config.print_interval == 0:
log_info = f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, top1: {acc1.item():.2f}%, top5: {acc5.item():.2f}%, loss: {loss.item():.4f}'
logger.info(log_info) if (config.distributed and local_rank
== 0) or not config.distributed else None
iter_index += 1
scheduler.step()
return top1.avg, top5.avg, losses.avg
def validate_KD(val_loader, model, criterion):
top1 = AverageMeter()
top5 = AverageMeter()
total_losses = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
model_on_cuda = next(model.parameters()).is_cuda
for images, targets in tqdm(val_loader):
if model_on_cuda:
images, targets = images.cuda(), targets.cuda()
tea_outputs, stu_outputs = model(images)
total_loss = 0
for loss_name in criterion.keys():
if 'KD' in loss_name:
temp_loss = criterion[loss_name](stu_outputs, tea_outputs)
else:
temp_loss = criterion[loss_name](stu_outputs, targets)
total_loss += temp_loss
acc1, acc5 = accuracy(stu_outputs, targets, topk=(1, 5))
total_losses.update(total_loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
return top1.avg, top5.avg, total_losses.avg
def train_KD(train_loader, model, criterion, optimizer, scheduler, epoch,
logger, config):
'''
train classification model for one epoch
'''
top1 = AverageMeter()
top5 = AverageMeter()
total_losses = AverageMeter()
# switch to train mode
model.train()
local_rank = torch.distributed.get_rank() if config.distributed else None
if config.distributed:
gpus_num = torch.cuda.device_count()
iters = len(train_loader.dataset) // (
config.batch_size * gpus_num) if config.distributed else len(
train_loader.dataset) // config.batch_size
else:
iters = len(train_loader.dataset) // config.batch_size
prefetcher = ClassificationDataPrefetcher(train_loader)
images, targets = prefetcher.next()
iter_index = 1
while images is not None:
images, targets = images.cuda(), targets.cuda()
tea_outputs, stu_outputs = model(images)
loss = 0
loss_value = {}
for loss_name in criterion.keys():
if 'KD' in loss_name:
temp_loss = criterion[loss_name](stu_outputs, tea_outputs)
else:
temp_loss = criterion[loss_name](stu_outputs, targets)
loss_value[loss_name] = temp_loss
loss += temp_loss
total_losses.update(loss.item(), images.size(0))
loss = loss / config.accumulation_steps
if config.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if iter_index % config.accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# measure accuracy and record loss
acc1, acc5 = accuracy(stu_outputs, targets, topk=(1, 5))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
images, targets = prefetcher.next()
log_info = ''
if iter_index % config.print_interval == 0:
log_info += f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, top1: {acc1.item():.2f}%, top5: {acc5.item():.2f}%, total_loss: {loss.item():.4f} '
for loss_name in criterion.keys():
log_info += f'{loss_name}: {loss_value[loss_name].item():.4f} '
logger.info(log_info) if (config.distributed and local_rank
== 0) or not config.distributed else None
iter_index += 1
scheduler.step()
return top1.avg, top5.avg, total_losses.avg
def compute_voc_ap(recall, precision, use_07_metric=True):
if use_07_metric:
# use voc 2007 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(recall >= t) == 0:
p = 0
else:
# get max precision for recall >= t
p = np.max(precision[recall >= t])
# average 11 recall point precision
ap = ap + p / 11.
else:
# use voc>=2010 metric,average all different recall precision as ap
# recall add first value 0. and last value 1.
mrecall = np.concatenate(([0.], recall, [1.]))
# precision add first value 0. and last value 0.
mprecision = | np.concatenate(([0.], precision, [0.])) | numpy.concatenate |
# Funktion til Fama-MacBeth 2-pass regression der skal bruges i Fama-French Framework
import numpy as np
from numpy import mat, mean, sqrt, diag
import statsmodels.api as sm
import pandas as pd
# Første forsøg kører med det downloadede Kenneth French data
returns = pd.read_csv(r'Data/5x5 FF.csv')
riskFactors = pd.read_csv(r'Data/FF3Monthly.csv')
returns = returns.drop(['Unnamed: 0'],axis=1)
returns = returns.sub(riskFactors['RF'], axis=0)
riskFactors = riskFactors.drop(['Unnamed: 0', 'RF'], axis=1)
FMB_result = FMB(returns, riskFactors, nLagsTS = 3)
def FMB(returns, riskFactors, nLagsTS):
# function fmbOut = famaMacBeth(returns,riskFactors)
# Purpose: Estimate linear asset pricing models using the Fama and MacBeth
# (1973) two-pass cross-sectional regression methodology.
#
# Input: returns = TxN maxtrix of portfolio excess returns
# riskFactors = TxK matrix of common risk factors
# nLagsTS = Scalar indicating the number of lags to include in HAC
# estimator of variance in first-stage regression
#
# Output: Two structures including results from the two steps
# Use mat for easier linear algebra
factors = mat(riskFactors.values)
excessReturns = mat(returns.values) # Måske ikke .values
# Shape information
t,n = excessReturns.shape
nFactors = factors.shape[1]
# Time series regressions
# X = sm.add_constant(factors) # Laver X ved at inkludere en 1 vektor på faktorer
# ts_res = sm.OLS(excessReturns, X).fit() # First pass regression # Gammel
ts_res = nwRegress(excessReturns, factors, 1, nLagsTS)
beta = ts_res['bv'][1:]
# Cross-section regression
cs_params = pd.DataFrame()
cs_X = sm.add_constant(beta.T)
for iObs in range(t):
cs_params = pd.concat([cs_params, pd.DataFrame(sm.OLS(excessReturns[iObs].T, cs_X).fit().params)], axis=1)
# Risk prices and Fama-MacBeth standard errors and t-stats
RiskPrices = cs_params.mean(axis=1).T
covGamma = (cs_params.T.sub(RiskPrices).T @ cs_params.T.sub(RiskPrices)) / t**2
# seGamma = sqrt((cs_params.T.sub(RiskPrices)**2).sum(axis=0)/t**2)
seGamma = sqrt(diag(covGamma))
tGammaFM = RiskPrices/seGamma
# Adding a Shanken (1992) corrections as per Goyal (2012) eq. (33)
covRiskFactors = ((factors - mean(factors, axis=0)).T @ (factors - mean(factors, axis=0))) / (t - nFactors)
c = RiskPrices[1:] @ np.linalg.inv(covRiskFactors) @ RiskPrices[1:].T # Excluding the constant
covShanken = 1/t * ((1+c) * (t * covGamma.iloc[1:,1:]) + covRiskFactors)
seGammaShanken = sqrt(diag(covShanken)).T
seGammaShanken = np.insert(seGammaShanken, 0,seGamma[0])
tGammaShanken = RiskPrices / seGammaShanken
# Mean and fitted excess returns
meanReturns = pd.DataFrame(mean(excessReturns,0))
fittedValues = (pd.DataFrame(cs_X) @ pd.DataFrame(RiskPrices)).T
# Cross sectional R^2
Ones = pd.DataFrame(np.ones((1, n), dtype=int)).T
errResid = meanReturns-fittedValues
s2 = mean(errResid**2, axis=1)
vary = mean((meanReturns.T - Ones*mean(meanReturns,axis=1))**2)
rSquared = 100 * (1 - s2 / vary)
MAPE = mean(abs(errResid), axis=0)
RMSE = sqrt(mean(errResid**2, axis=0))
fmbOut = dict()
fmbOut['FS_beta'] = ts_res['bv']
fmbOut['FS_tstat'] = ts_res['tbv']
fmbOut['FS_R2'] = ts_res['R2v']
fmbOut['FS_R2adj'] = ts_res['R2vadj']
fmbOut['SS_gamma'] = RiskPrices
fmbOut['SS_seGammaFM'] = seGamma
fmbOut['SS_seGammaShanken'] = seGammaShanken
fmbOut['SS_tstatFM'] = tGammaFM
fmbOut['SS_tstatShanken'] = tGammaShanken
fmbOut['SS_r2'] = rSquared
fmbOut['SS_fit'] = fittedValues
fmbOut['MeanReturns'] = meanReturns
fmbOut['MAPE'] = MAPE
fmbOut['RMSE'] = RMSE
fmbOut['cShanken'] = c
return fmbOut
def nwRegress(y, x, constant, nlag):
# Function regResults = nwRegress(y,x,constant, method,nlag)
# Purpose: Estimate a linear regression model Newey-West standard errors.
# a constant is added by default unless otherwise specified
# Input: y = TxN matrix of dependent variables (N seperate regressions)
# x = A TxK matrix of common explanatory variables
# constant = 1 to add constant internally, 0 otherwise
# nlag = scalar indicating the number of lags to include
# Output: A structure including:
# bv = A K x N matrix of parameter estimates
# sbv= A K x N matrix of user-selected standard errors
# tbv= A K x N matrix of t-statistics
# R2v= A N x 1 vector of r-square values
# R2vadj= A N x 1 vector of adjusted r-square values
# Preliminaries
#y = mat(y.values)
#x = mat(x.values)
if constant == 1:
x = sm.add_constant(x)
nObs, nReg = y.shape
nVars = x.shape[1]
OnesObs = pd.DataFrame(np.ones((1, nObs), dtype=int)).T
OnesVars = pd.DataFrame(np.ones((1, nVars), dtype=int)).T
# Coefficient estimates
bv = sm.OLS(y, x).fit().params
# Input for standard errors
Exx = x.T @ x / nObs
errv = y - x @ bv
# Coefficient determination
s2 = mean(np.square(errv), axis = 0)
vary = mat(mean((y - OnesObs @ mean(y, axis = 0))**2, axis = 0))
s2vary = np.divide(s2, vary)
R2v = 100 * (1 - s2vary).T
R2vadj = 100 * (1- s2vary * (nObs-1) / (nObs - nVars)).T
# Newey-West standard errors
# Preallocations
sbv = np.zeros((nVars, nReg))
tbv = np.zeros((nVars, nReg))
# Individual regressions for each dependent variable
for iReg in range(nReg):
ww = 1
err = errv[:,iReg] # (:,iReg)
inner = (x * (err @ OnesVars.T)).T @ (x * (err @ OnesVars.T)) / nObs
for iLag in range(1,nlag):
innadd = (x[1:(nObs-iLag),:] * (err[1:(nObs-iLag)] @ OnesVars.T)).T @ (x[1+iLag:nObs,:] * (err[1+iLag:nObs] @ OnesVars.T)) / nObs
inner = inner + (1 - ww * iLag/(nlag+1)) * (innadd+innadd.T)
varb = sm.OLS(inner, Exx).fit().params @ np.linalg.inv(Exx) / nObs
# Standard errors
sbv[:,iReg] = sqrt(diag(varb))
# t-stats
tbv[:,iReg] = bv[:,iReg] / sbv[:, iReg]
# Structure for results:
nwOut = dict()
nwOut['bv'] = bv
nwOut['tbv'] = tbv
nwOut['R2v'] = R2v
nwOut['R2vadj'] = R2vadj
nwOut['resid'] = errv
return nwOut
# Test med Quarterly data for at se om resultater matcher hans
sizeValuePortfolios2 = pd.read_csv(r'Data/sizeValuePortfoliosQ2.csv', sep=';' ,header=None) # fjerne kolonne 0,3 og 15
sizeValuePortfolios = pd.read_csv(r'Data/sizeValuePortfoliosQ.csv', sep=';' ,header=None, dtype=np.float64) # fjerne kolonne 0,3 og 15
famaFrenchFactorsQ = pd.read_csv(r'Data/famaFrenchFactorsQ.csv', sep=';', header=None, dtype=np.float64)
logConsGrowthQ = pd.read_csv(r'Data/logConsGrowthQ.csv', sep=';', header=None)
SMB = pd.DataFrame()
HML = pd.DataFrame()
riskFreeRate = pd.DataFrame()
SMB['SMB'] = famaFrenchFactorsQ.iloc[0:,0]
HML['HML'] = famaFrenchFactorsQ.iloc[0:,1]
logConsGrowth = logConsGrowthQ
riskFreeRate['RF'] = famaFrenchFactorsQ.iloc[0:,2]
excessReturns = sizeValuePortfolios.sub(riskFreeRate['RF'], axis=0)
meanExcess = | mean(excessReturns) | numpy.mean |
# coding: utf-8
# public items
__all__ = ["loaddfits", "savefits", "loadnetcdf", "savenetcdf"]
# standard library
from datetime import datetime
from pytz import timezone
from logging import getLogger
from uuid import uuid4
from pathlib import Path
from pkgutil import get_data
# dependent packages
import tomli
import decode as dc
import numpy as np
import xarray as xr
from astropy.io import fits
from scipy.interpolate import interp1d
# module logger
logger = getLogger(__name__)
def loaddfits(
fitsname,
coordtype="azel",
loadtype="temperature",
starttime=None,
endtime=None,
pixelids=None,
scantypes=None,
mode=0,
**kwargs
):
"""Load a decode array from a DFITS file.
Args:
fitsname (str): Name of DFITS file.
coordtype (str): Coordinate type included into a decode array.
'azel': Azimuth / elevation.
'radec': Right ascension / declination.
loadtype (str): Data unit of xarray.
'Tsignal': Temperature [K].
'Psignal': Power [W].
'amplitude': Amplitude.
'phase': Phase.
'linphase': Linear phase.
starttime (int, str or numpy.datetime64): Start time of loaded data.
It can be specified by the start index (int),
the time compatible with numpy.datetime64 (str),
or numpy.datetime64 (numpy.datetime64).
Default is None and it means the data will be loaded from the first record.
endtime (int, str or numpy.datetime64): End time of loaded data.
It can be specified by the end index (int),
the time compatible with numpy.datetime64 (str),
or numpy.datetime64 (numpy.datetime64).
Default is None and it means the data will be loaded until the last record.
pixelids (int or list): Under development.
scantypes (list(str)): Scan types, such as 'GRAD', 'SCAN', 'OFF', 'R'.
mode (int): Loading mode.
0: Relative coordinates with cosine projection (RECOMMENDED).
1: Relative coordinates without cosine projection.
2: Absolute coordinates.
kwargs (optional):
findR (bool): Automatically find R positions.
ch (int): Representative channel id used for finding R.
Rth (float): Threshold of R.
skyth (flaot): Threshold of sky.
cutnum (int): The number of points of unused data at the edge.
still (bool): When it is true, scantypes of on/off are manually assigned.
period (float): On/off period in second for still data.
shuttle (bool): For shuttle observations.
xmin_off (float): Minimum x of off-point data.
xmax_off (float): Maximum x of off-point data.
xmin_on (float): Minimum x of on-point data.
xmax_on (float): Maximum x of on-point data.
Returns:
decode array (decode.array): Loaded decode array.
"""
if mode not in [0, 1, 2]:
raise KeyError(mode)
logger.info("coordtype starttime endtime mode loadtype")
logger.info("{} {} {} {} {}".format(coordtype, starttime, endtime, mode, loadtype))
# pick up kwargs
# for findR
findR = kwargs.pop("findR", False)
ch = kwargs.pop("ch", 0)
Rth = kwargs.pop("Rth", 280)
skyth = kwargs.pop("skyth", 150)
cutnum = kwargs.pop("cutnum", 1)
# for still
still = kwargs.pop("still", False)
period = kwargs.pop("period", 2)
# for shuttle
shuttle = kwargs.pop("shuttle", False)
xmin_off = kwargs.pop("xmin_off", 0)
xmax_off = kwargs.pop("xmax_off", 0)
xmin_on = kwargs.pop("xmin_on", 0)
xmax_on = kwargs.pop("xmax_on", 0)
# load data
fitsname = str(Path(fitsname).expanduser())
with fits.open(fitsname) as hdulist:
obsinfo = hdulist["OBSINFO"].data
obshdr = hdulist["OBSINFO"].header
antlog = hdulist["ANTENNA"].data
readout = hdulist["READOUT"].data
wealog = hdulist["WEATHER"].data
# obsinfo
masterids = obsinfo["masterids"][0].astype(np.int64)
kidids = obsinfo["kidids"][0].astype(np.int64)
kidfreqs = obsinfo["kidfreqs"][0].astype(np.float64)
kidtypes = obsinfo["kidtypes"][0].astype(np.int64)
# parse start/end time
t_ant = np.array(antlog["time"]).astype(np.datetime64)
t_out = np.array(readout["starttime"]).astype(np.datetime64)
t_wea = np.array(wealog["time"]).astype(np.datetime64)
if starttime is None:
startindex = 0
elif isinstance(starttime, int):
startindex = starttime
elif isinstance(starttime, str):
startindex = np.searchsorted(t_out, np.datetime64(starttime))
elif isinstance(starttime, np.datetime64):
startindex = np.searchsorted(t_out, starttime)
else:
raise ValueError(starttime)
if endtime is None:
endindex = t_out.shape[0]
elif isinstance(endtime, int):
endindex = endtime
elif isinstance(endtime, str):
endindex = np.searchsorted(t_out, np.datetime64(endtime), "right")
elif isinstance(endtime, np.datetime64):
endindex = np.searchsorted(t_out, endtime, "right")
else:
raise ValueError(starttime)
if t_out[endindex - 1] > t_ant[-1]:
logger.warning("Endtime of readout is adjusted to that of ANTENNA HDU.")
endindex = np.searchsorted(t_out, t_ant[-1], "right")
t_out = t_out[startindex:endindex]
# readout
if loadtype == "temperature":
response = readout["Tsignal"][startindex:endindex].astype(np.float64)
elif loadtype == "power":
response = readout["Psignal"][startindex:endindex].astype(np.float64)
elif loadtype == "amplitude":
response = readout["amplitude"][startindex:endindex].astype(np.float64)
elif loadtype == "phase":
response = readout["phase"][startindex:endindex].astype(np.float64)
elif loadtype == "linphase":
response = readout["line_phase"][startindex:endindex].astype(np.float64)
else:
raise KeyError(loadtype)
# antenna
if coordtype == "azel":
x = antlog["az"].copy()
y = antlog["el"].copy()
xref = np.median(antlog["az_center"])
yref = np.median(antlog["el_center"])
if mode in [0, 1]:
x -= antlog["az_center"]
y -= antlog["el_center"]
if mode == 0:
x *= np.cos(np.deg2rad(antlog["el"]))
elif coordtype == "radec":
x = antlog["ra"].copy()
y = antlog["dec"].copy()
xref = obshdr["RA"]
yref = obshdr["DEC"]
if mode in [0, 1]:
x -= xref
y -= yref
if mode == 0:
x *= np.cos(np.deg2rad(antlog["dec"]))
else:
raise KeyError(coordtype)
scantype = antlog["scantype"]
# weatherlog
temp = wealog["temperature"]
pressure = wealog["pressure"]
vpressure = wealog["vapor-pressure"]
windspd = wealog["windspd"]
winddir = wealog["winddir"]
# interpolation
dt_out = (t_out - t_out[0]) / np.timedelta64(1, "s")
dt_ant = (t_ant - t_out[0]) / np.timedelta64(1, "s")
dt_wea = (t_wea - t_out[0]) / np.timedelta64(1, "s")
x_i = np.interp(dt_out, dt_ant, x)
y_i = np.interp(dt_out, dt_ant, y)
temp_i = np.interp(dt_out, dt_wea, temp)
pressure_i = np.interp(dt_out, dt_wea, pressure)
vpressure_i = np.interp(dt_out, dt_wea, vpressure)
windspd_i = np.interp(dt_out, dt_wea, windspd)
winddir_i = np.interp(dt_out, dt_wea, winddir)
scandict = {t: n for n, t in enumerate(np.unique(scantype))}
scantype_v = np.zeros(scantype.shape[0], dtype=int)
for k, v in scandict.items():
scantype_v[scantype == k] = v
scantype_vi = interp1d(
dt_ant,
scantype_v,
kind="nearest",
bounds_error=False,
fill_value=(scantype_v[0], scantype_v[-1]),
)(dt_out)
scantype_i = np.full_like(scantype_vi, "GRAD", dtype="<U8")
for k, v in scandict.items():
scantype_i[scantype_vi == v] = k
# for still data
if still:
for n in range(int(dt_out[-1]) // period + 1):
offmask = (period * 2 * n <= dt_out) & (dt_out < period * (2 * n + 1))
onmask = (period * (2 * n + 1) <= dt_out) & (dt_out < period * (2 * n + 2))
scantype_i[offmask] = "OFF"
scantype_i[onmask] = "SCAN"
if shuttle:
offmask = (xmin_off < x_i) & (x_i < xmax_off)
onmask = (xmin_on < x_i) & (x_i < xmax_on)
scantype_i[offmask] = "OFF"
scantype_i[onmask] = "SCAN"
scantype_i[(~offmask) & (~onmask)] = "JUNK"
if findR:
Rindex = np.where(response[:, ch] >= Rth)
scantype_i[Rindex] = "R"
movemask = np.hstack(
[[False] * cutnum, scantype_i[cutnum:] != scantype_i[:-cutnum]]
) | np.hstack(
[scantype_i[:-cutnum] != scantype_i[cutnum:], [False] * cutnum]
) & (
scantype_i == "R"
)
scantype_i[movemask] = "JUNK"
scantype_i[(response[:, ch] > skyth) & (scantype_i != "R")] = "JUNK"
scantype_i[(response[:, ch] <= skyth) & (scantype_i == "R")] = "JUNK"
skyindex = np.where(response[:, ch] <= skyth)
scantype_i_temp = scantype_i.copy()
scantype_i_temp[skyindex] = "SKY"
movemask = np.hstack(
[[False] * cutnum, scantype_i_temp[cutnum:] != scantype_i_temp[:-cutnum]]
) | np.hstack(
[scantype_i_temp[:-cutnum] != scantype_i_temp[cutnum:], [False] * cutnum]
) & (
scantype_i_temp == "SKY"
)
scantype_i[movemask] = "JUNK"
# scanid
scanid_i = np.cumsum(np.hstack([False, scantype_i[1:] != scantype_i[:-1]]))
# coordinates
tcoords = {
"x": x_i,
"y": y_i,
"time": t_out,
"temp": temp_i,
"pressure": pressure_i,
"vapor-pressure": vpressure_i,
"windspd": windspd_i,
"winddir": winddir_i,
"scantype": scantype_i,
"scanid": scanid_i,
}
chcoords = {
"masterid": masterids,
"kidid": kidids,
"kidfq": kidfreqs,
"kidtp": kidtypes,
}
scalarcoords = {
"coordsys": coordtype.upper(),
"datatype": loadtype,
"xref": xref,
"yref": yref,
}
# make array
array = dc.array(
response, tcoords=tcoords, chcoords=chcoords, scalarcoords=scalarcoords
)
if scantypes is not None:
mask = np.full(array.shape[0], False)
for scantype in scantypes:
mask |= array.scantype == scantype
array = array[mask]
return array
def savefits(cube, fitsname, **kwargs):
"""Save a cube to a 3D-cube FITS file.
Args:
cube (xarray.DataArray): Cube to be saved.
fitsname (str): Name of output FITS file.
kwargs (optional): Other arguments common with astropy.io.fits.writeto().
"""
# pick up kwargs
dropdeg = kwargs.pop("dropdeg", False)
ndim = len(cube.dims)
# load yaml
fitsinfo = get_data("decode", "data/fitsinfo.toml")
hdrdata = tomli.loads(fitsinfo.decode("utf-8"))
# default header
if ndim == 2:
header = fits.Header(hdrdata["dcube_2d"])
data = cube.values.T
elif ndim == 3:
if dropdeg:
header = fits.Header(hdrdata["dcube_2d"])
data = cube.values[:, :, 0].T
else:
header = fits.Header(hdrdata["dcube_3d"])
kidfq = cube.kidfq.values
freqrange = ~np.isnan(kidfq)
orderedfq = | np.argsort(kidfq[freqrange]) | numpy.argsort |
# Import standard modules
import sqlite3
from datetime import datetime, timedelta
# Import installed modules
import spiceypy
import numpy as np
import pandas as pd
#%%
# Load the SPICE kernel meta file
spiceypy.furnsh('kernel_meta.txt')
#%%
# Get the G*M value of the Sun
_, GM_SUN_PRE = spiceypy.bodvcd(bodyid=10, item='GM', maxn=1)
GM_SUN = GM_SUN_PRE[0]
#%%
# Connect to the comet database
CON = sqlite3.connect('../_databases/_comets/mpc_comets.db')
# Extract orbit data of the comet C/2019 Y4 (ATLAS)
ATLAS_ORB_EL = pd.read_sql('SELECT NAME, PERIHELION_AU, ' \
'ECCENTRICITY, INCLINATION_DEG, ' \
'LONG_OF_ASC_NODE_DEG, ARG_OF_PERIH_DEG, ' \
'MEAN_ANOMALY_DEG, EPOCH_ET ' \
'FROM comets_main ' \
'WHERE NAME="C/2019 Y4 (ATLAS)"', CON)
# Convert the perihelion, that is given in AU, to km
ATLAS_ORB_EL.loc[:, 'PERIHELION_KM'] = \
ATLAS_ORB_EL['PERIHELION_AU'].apply(lambda x: \
spiceypy.convrt(x, inunit='AU', \
outunit='km'))
# Convert all angular parameters to radians, since the entries in the database
# are stored in degrees. The for-loop iterates through all column names that
# contain the word "DEG"
for angle_col_name in [col for col in ATLAS_ORB_EL.columns if 'DEG' in col]:
ATLAS_ORB_EL.loc[:, angle_col_name.replace('DEG', 'RAD')] = \
np.radians(ATLAS_ORB_EL[angle_col_name])
# Add the G*M value of the Sun
ATLAS_ORB_EL.loc[:, 'SUN_GM'] = GM_SUN
#%%
# Extract all orbital elements / information in a SPICE compatible order (see
# function conics)
ATLAS_SPICE_ORB_EL = ATLAS_ORB_EL[['PERIHELION_KM', 'ECCENTRICITY', \
'INCLINATION_RAD', 'LONG_OF_ASC_NODE_RAD', \
'ARG_OF_PERIH_RAD', 'MEAN_ANOMALY_DEG', \
'EPOCH_ET', 'SUN_GM']].iloc[0].values
#%%
# Set an initial time and end time for the computation procedure
INI_DATETIME = datetime(year=2020, month=5, day=20)
END_DATETIME = datetime(year=2020, month=6, day=10)
# Create an array that covers the initial and end time in 1 hour steps
TIME_ARRAY = np.arange(INI_DATETIME, END_DATETIME, \
timedelta(hours=1)).astype(datetime)
#%%
# Set an empty array that will store the distances between the Sun
# and ATLAS
atlas_vecs = []
# Set an empty array that will store the distances between the Sun
# and the Solar Orbiter
solar_orb_vecs = []
# Iterate through the time array (comet ATLAS)
for atlas_time_step in TIME_ARRAY:
# Compute the ET
atlas_et = spiceypy.datetime2et(atlas_time_step)
# Compute the ET corresponding state vector of the comet ATLAS
atlas_state_vec = spiceypy.conics(ATLAS_SPICE_ORB_EL, atlas_et)
# Store the position vector
atlas_vecs.append(atlas_state_vec[:3])
# Iterate through the time array (Solar Orbiter)
for so_time_step in TIME_ARRAY:
# Compute the ET
so_et = spiceypy.datetime2et(so_time_step)
# Compute the state vector of the Solar Orbiter (NAIF ID: -144)
solar_orb_state_vec, _ = spiceypy.spkgeo(targ=-144, et=so_et, \
ref='ECLIPJ2000', obs=10)
# Store the position vector
solar_orb_vecs.append(solar_orb_state_vec[:3])
# Convert the lists that contain the vectors to numpy lists
atlas_vecs = np.array(atlas_vecs)
solar_orb_vecs = np.array(solar_orb_vecs)
#%%
# Minimum distance ATLAS - Sun
MIN_DIST_ATLAS_SUN = np.min(np.linalg.norm(atlas_vecs, axis=1))
print('Minimum distance ATLAS - Sun in AU: ' \
f'{spiceypy.convrt(MIN_DIST_ATLAS_SUN, "km", "AU")}')
# Minimum distance Solar Orbiter - Sun
MIN_DIST_SOLAR_ORB_SUN = np.min(np.linalg.norm(solar_orb_vecs, axis=1))
print('Minimum distance Solar Orbiter - Sun in AU: ' \
f'{spiceypy.convrt(MIN_DIST_SOLAR_ORB_SUN, "km", "AU")}')
print('\n')
#%%
# What is the closest approach between both trajectories?
# Compute a matrix that contains all possible distances, using the scipy
# function cdist
import scipy.spatial
MIN_DIST_MATRIX = scipy.spatial.distance.cdist(atlas_vecs, solar_orb_vecs)
# Print the minimum distance
print('Minimum distance between ATLAS and Solar Orbiter in km: ' \
f'{np.min(np.round(MIN_DIST_MATRIX))}')
print('\n')
#%%
# The timing needs to be correct too! The comet produces ions and creates
# its tail within the spacecraft's trajectory. Thus, the comet needs to pass
# by the minimum distance first
# Determine the distance matrix indices of the closest approach
indices_min = np.where(MIN_DIST_MATRIX == np.min(MIN_DIST_MATRIX))
indices_min = [k.item() for k in indices_min]
# Let's print the indices for
# ATLAS
print(f'Atlas Index of close approach: {indices_min[0]}')
# Solar Orbiter
print(f'Solar Orbiter Index of close approach: {indices_min[1]}')
print('\n')
#%%
# Corresponding times (only a few days apart. Thus, an ion tail could be
# detectable)
print(f'ATLAS closest approach date-time: {TIME_ARRAY[indices_min[0]]}')
print('Solar Orbiter closest approach date-time: ' \
f'{TIME_ARRAY[indices_min[1]]}')
print('\n')
#%%
# ... but is the ion tail "aiming" towards the trajectory of the spacecraft?
# (at least within a few degrees?)
# Compute the angular distance between the trajectories' closest approach
# Set the closest approach vectors, based on the obtained indices for ATLAS and
# the Solar Orbiter, respectively
VEC_ATLAS_AP = atlas_vecs[indices_min[0]]
VEC_SOLAR_ORB_AP = solar_orb_vecs[indices_min[1]]
# Determine the norm of both closest approach vectors
ATLAS_NORM_AP = spiceypy.vnorm(VEC_ATLAS_AP)
SOLORB_NORM_AP = spiceypy.vnorm(VEC_SOLAR_ORB_AP)
# Compute the dot product
DOT_PRODUCT_AP = | np.dot(VEC_ATLAS_AP, VEC_SOLAR_ORB_AP) | numpy.dot |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""Earth coordinate conversion routines
Functions
---------
geodToGeoc : converts from geodetic to geocentric (and vice-versa)
geodToGeocAzEl : converts azimuth and elevation from geodetic to geocentric
(and vice-versa)
gspToGcar : converts global spherical coordinates to global cartesian
coordinates (and vice-versa)
gcarToLcar : converts from global cartesian coordinates to local cartesian
coordinates (and vice-versa)
lspToLcar : converts from local spherical coordinates to local cartesian
coordinates (and vice-versa)
calcDistPnt : calculates the coordines|distance,elevation,azimuth of a point
given a point of origin and distance, elevation, azimuth|distant
point coordinates
greatCircleMove : Calculates the coordinates of an end point along a great
circle path given the original coordinates, distance, azimuth,
and altitude.
greatCircleAzm : Calculates the azimuth from the coordinates of a start point
to and end point along a great circle path.
greatCircleDist : Calculates the distance in radians along a great circle path
between two points.
References
----------
Based on <NAME>'s geopack
Based on <NAME> radar.pro
Updates based on <NAME> cnvtcoord_vhm.c
Copied from DaViTPy
"""
import logging
import numpy as np
def geodToGeoc(lat, lon, inverse=False):
"""Converts position from geodetic to geocentric or vice-versa.
Based on the IAU 1964 oblate spheroid model of the Earth.
Parameters
----------
lat : float
latitude [degree]
lon : float
longitude [degree]
inverse : Optional[bool]
inverse conversion (geocentric to geodetic). Default is false.
Returns
-------
lat_out : float
latitude [degree] (geocentric/detic if inverse=False/True)
lon_out : float
longitude [degree] (geocentric/detic if inverse=False/True)
rade : float
Earth radius [km] (geocentric/detic if inverse=False/True)
"""
a = 6378.16
f = 1.0 / 298.25
b = a * (1.0 - f)
e2 = (a**2 / b**2) - 1.0
if not inverse:
# geodetic into geocentric
lat_out = np.degrees(np.arctan(b**2 / a**2 * np.tan(np.radians(lat))))
lon_out = lon
else:
# geocentric into geodetic
lat_out = np.degrees(np.arctan(a**2 / b**2 * np.tan(np.radians(lat))))
lon_out = lon
rade = a / np.sqrt( 1. + e2 * np.sin(np.radians(lat_out))**2)
return lat_out, lon_out, rade
def geodToGeocAzEl(lat, lon, az, el, inverse=False):
"""Converts pointing azimuth and elevation measured with respect to the
local horizon to azimuth and elevation with respect to the horizon defined
by the plane perpendicular to the Earth-centered radial vector drawn
through a user defined point.
Parameters
----------
lat : float
latitude [degree]
lon : float
longitude [degree]
az : float
azimuth [degree, N]
el : float
elevation [degree]
inverse : Optional[bool]
inverse conversion
Returns
-------
lat : float
latitude [degree]
lon : float
longitude [degree]
Re : float
Earth radius [km]
az : float
azimuth [degree, N]
el : float
elevation [degree]
"""
taz = np.radians(az)
tel = np.radians(el)
# In this transformation x is east, y is north and z is up
if not inverse:
# Calculate deviation from vertical (in radians)
(geocLat, geocLon, Re) = geodToGeoc(lat, lon)
devH = np.radians(lat - geocLat)
# Calculate cartesian coordinated in local system
kxGD = np.cos(tel) * np.sin(taz)
kyGD = np.cos(tel) * np.cos(taz)
kzGD = np.sin(tel)
# Now rotate system about the x axis to align local vertical vector
# with Earth radial vector
kxGC = kxGD
kyGC = kyGD * np.cos(devH) + kzGD * np.sin(devH)
kzGC = -kyGD * np.sin(devH) + kzGD * np.cos(devH)
# Finally calculate the new azimuth and elevation in the geocentric
# frame
azOut = np.degrees(np.arctan2(kxGC, kyGC))
elOut = np.degrees(np.arctan(kzGC / np.sqrt(kxGC**2 + kyGC**2)))
latOut = geocLat
lonOut = geocLon
else:
# Calculate deviation from vertical (in radians)
(geodLat, geodLon, Re) = geodToGeoc(lat, lon, inverse=True)
devH = np.radians(geodLat - lat)
# Calculate cartesian coordinated in geocentric system
kxGC = np.cos(tel) * np.sin(taz)
kyGC = np.cos(tel) * np.cos(taz)
kzGC = np.sin(tel)
# Now rotate system about the x axis to align local vertical vector
# with Earth radial vector
kxGD = kxGC
kyGD = kyGC * np.cos(-devH) + kzGC * np.sin(-devH)
kzGD = -kyGC * np.sin(-devH) + kzGC * np.cos(-devH)
# Finally calculate the new azimuth and elevation in the geocentric
# frame
azOut = np.degrees(np.arctan2(kxGD, kyGD))
elOut = np.degrees(np.arctan(kzGD / np.sqrt(kxGD**2 + kyGD**2)))
latOut = geodLat
lonOut = geodLon
return latOut, lonOut, Re, azOut, elOut
def gspToGcar(xin, yin, zin, inverse=False):
"""Converts a position from global spherical (geocentric) to global
cartesian (and vice-versa).
Parameters
----------
xin : float
latitude [degree] or global cartesian X [km]
yin : float
longitude [degree] or global cartesian Y [km]
zin : float
distance from center of the Earth [km] or global cartesian Z [km]
inverse : Optional[bool]
inverse conversion
Returns
-------
xout : float
global cartesian X [km] (inverse=False) or latitude [degree]
yout : float
global cartesian Y [km] (inverse=False) or longitude [degree]
zout : float
global cartesian Z [km] (inverse=False) or distance from the center of
the Earth [km]
Notes
-------
The global cartesian coordinate system is defined as:
- origin: center of the Earth
- x-axis in the equatorial plane and through the prime meridian.
- z-axis in the direction of the rotational axis and through the North
pole
The meaning of the input (x,y,z) depends on the direction of the conversion
(to global cartesian or to global spherical).
"""
if not inverse:
# Global spherical to global cartesian
xout = zin * np.cos( | np.radians(xin) | numpy.radians |
import datetime as dt
from io import StringIO
import logging
import numpy as np
import os
import pytest
import warnings
import aacgmv2
class TestConvertArray:
def setup(self):
self.out = None
self.ref = None
self.rtol = 1.0e-4
def teardown(self):
del self.out, self.ref, self.rtol
def evaluate_output(self, ind=None):
""" Function used to evaluate convert_latlon_arr output"""
if self.out is not None:
if ind is not None:
self.ref = [[rr[ind]] for rr in self.ref]
np.testing.assert_equal(len(self.out), len(self.ref))
for i, oo in enumerate(self.out):
if not isinstance(oo, np.ndarray):
raise TypeError("output value is not a numpy array")
np.testing.assert_equal(len(oo), len(self.ref[i]))
np.testing.assert_allclose(oo, self.ref[i], rtol=self.rtol)
class TestConvertLatLon:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.in_args = [60, 0]
self.out = None
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.in_args, self.rtol, self.dtime, self.ddate
@pytest.mark.parametrize('alt,method_code,ref',
[(300, 'TRACE', [58.2268, 81.1613, 1.0457]),
(3000.0, "G2A|BADIDEA", [64.3578, 83.2895,
1.4694]),
(7000.0, "G2A|TRACE|BADIDEA",
[69.3187, 85.0845, 2.0973])])
def test_convert_latlon(self, alt, method_code, ref):
"""Test single value latlon conversion"""
self.in_args.extend([alt, self.dtime, method_code])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
@pytest.mark.parametrize('lat,ref',
[(90.01, [83.927161, 170.1471396, 1.04481923]),
(-90.01, [-74.9814852, 17.990332, 1.044819236])])
def test_convert_latlon_high_lat(self, lat, ref):
"""Test single latlon conversion with latitude just out of bounds"""
self.in_args[0] = lat
self.in_args.extend([300, self.dtime, 'G2A'])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
def test_convert_latlon_datetime_date(self):
"""Test single latlon conversion with date and datetime input"""
self.in_args.extend([300, self.ddate, 'TRACE'])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, [58.2268, 81.1613, 1.0457],
rtol=self.rtol)
def test_convert_latlon_location_failure(self):
"""Test single value latlon conversion with a bad location"""
self.out = aacgmv2.convert_latlon(0, 0, 0, self.dtime, self.in_args[-1])
assert np.all(np.isnan(np.array(self.out)))
def test_convert_latlon_maxalt_failure(self):
"""test convert_latlon failure for an altitude too high for coeffs"""
self.in_args.extend([2001, self.dtime, ""])
self.out = aacgmv2.convert_latlon(*self.in_args)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_rep,in_irep,msg',
[(None, 3, "must be a datetime object"),
(91, 0, "unrealistic latitude"),
(-91, 0, "unrealistic latitude"),
(None, 4, "unknown method code")])
def test_convert_latlon_failure(self, in_rep, in_irep, msg):
self.in_args.extend([300, self.dtime, "G2A"])
self.in_args[in_irep] = in_rep
with pytest.raises(ValueError, match=msg):
aacgmv2.convert_latlon(*self.in_args)
class TestConvertLatLonArr(TestConvertArray):
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.lat_in = [60.0, 61.0]
self.lon_in = [0.0, 0.0]
self.alt_in = [300.0, 300.0]
self.method = 'TRACE'
self.out = None
self.ref = [[58.2268, 59.3184], [81.1613, 81.6080], [1.0457, 1.0456]]
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.lat_in, self.lon_in, self.alt_in, self.dtime, self.ddate
del self.method, self.out, self.ref, self.rtol
def test_convert_latlon_arr_single_val(self):
"""Test array latlon conversion for a single value"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_arr_single(self):
"""Test array latlon conversion for array input of shape (1,)"""
self.out = aacgmv2.convert_latlon_arr(np.array([self.lat_in[0]]),
np.array([self.lon_in[0]]),
np.array([self.alt_in[0]]),
self.dtime, self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_list_single(self):
"""Test array latlon conversion for list input of single values"""
self.out = aacgmv2.convert_latlon_arr([self.lat_in[0]],
[self.lon_in[0]],
[self.alt_in[0]], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_list(self):
"""Test array latlon conversion for list input"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr(self):
"""Test array latlon conversion for array input"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
np.array(self.lon_in),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
def test_convert_latlon_arr_list_mix(self):
"""Test array latlon conversion for mixed types with list"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr_mix(self):
"""Test array latlon conversion for mixed type with an array"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
self.lon_in[0], self.alt_in[0],
self.dtime, self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr_mult_and_single_element(self):
"""Test latlon conversion for arrays with multiple and single vals"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
np.array([self.lon_in[0]]),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
@pytest.mark.parametrize('method_code,alt,local_ref',
[("BADIDEA", 3000.0,
[[64.3580], [83.2895], [1.4694]]),
("BADIDEA|TRACE", 7000.0,
[[69.3187], [85.0845], [2.0973]])])
def test_convert_latlon_arr_badidea(self, method_code, alt, local_ref):
"""Test array latlon conversion for BADIDEA"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
[alt], self.dtime, method_code)
self.ref = local_ref
self.evaluate_output()
def test_convert_latlon_arr_location_failure(self):
"""Test array latlon conversion with a bad location"""
with warnings.catch_warnings():
# Causes all warnings to be surpressed
warnings.simplefilter("ignore")
# Trigger a warning
self.out = aacgmv2.convert_latlon_arr([0], [0], [0], self.dtime, "")
# Test the output
np.testing.assert_equal(len(self.out), len(self.ref))
assert np.any(~np.isfinite(np.array(self.out)))
def test_convert_latlon_arr_datetime_date(self):
"""Test array latlon conversion with date and datetime input"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_clip(self):
"""Test array latlon conversion with latitude clipping"""
self.lat_in = [90.01, -90.01]
self.ref = [[83.92352053, -74.98110552], [170.1381271, 17.98164313],
[1.04481924, 1.04481924]]
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_maxalt_failure(self):
"""test convert_latlon_arr failure for altitudes too high for coeffs"""
self.method = ""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
[2001], self.dtime, self.method)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_rep,in_irep,msg',
[(None, 3, "must be a datetime object"),
([np.full(shape=(3, 2), fill_value=50.0), 0],
[0, 1], "unable to process multi-dimensional"),
([50, 60, 70], 0, "arrays are mismatched"),
([[91, 60, -91], 0, 300], [0, 1, 2],
"unrealistic latitude"),
(None, 4, "unknown method code")])
def test_convert_latlon_arr_failure(self, in_rep, in_irep, msg):
in_args = np.array([self.lat_in, self.lon_in, self.alt_in, self.dtime,
"G2A"], dtype=object)
in_args[in_irep] = in_rep
with pytest.raises(ValueError, match=msg):
aacgmv2.convert_latlon_arr(*in_args)
class TestGetAACGMCoord:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.in_args = [60, 0]
self.out = None
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.in_args, self.rtol, self.dtime, self.ddate
@pytest.mark.parametrize('alt,method_code,ref',
[(300, 'TRACE', [58.2268, 81.1613, 0.1888]),
(3000.0, "G2A|BADIDEA", [64.3578, 83.2895,
0.3307]),
(7000.0, "G2A|TRACE|BADIDEA",
[69.3187, 85.0845, 0.4503])])
def test_get_aacgm_coord(self, alt, method_code, ref):
"""Test single value AACGMV2 calculation, defaults to TRACE"""
self.in_args.extend([alt, self.dtime, method_code])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
def test_get_aacgm_coord_datetime_date(self):
"""Test single AACGMV2 calculation with date and datetime input"""
self.in_args.extend([300.0, self.ddate, 'TRACE'])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.testing.assert_allclose(self.out, [58.2268, 81.1613, 0.1888],
rtol=self.rtol)
def test_get_aacgm_coord_location_failure(self):
"""Test single value AACGMV2 calculation with a bad location"""
self.in_args.extend([0.0, self.dtime, 'TRACE'])
self.in_args[0] = 0.0
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.all(np.isnan(np.array(self.out)))
def test_get_aacgm_coord_maxalt_failure(self):
"""test get_aacgm_coord failure for an altitude too high for coeffs"""
self.in_args.extend([2001, self.dtime, ""])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_index,value',
[(3, None), (0, 91.0), (0, -91.0)])
def test_get_aacgm_coord_raise_value_error(self, in_index, value):
"""Test different ways to raise a ValueError"""
self.in_args.extend([300.0, self.dtime])
self.in_args[in_index] = value
with pytest.raises(ValueError):
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
class TestGetAACGMCoordArr(TestConvertArray):
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.lat_in = [60.0, 61.0]
self.lon_in = [0.0, 0.0]
self.alt_in = [300.0, 300.0]
self.method = 'TRACE'
self.out = None
self.ref = [[58.22676, 59.31847], [81.16135, 81.60797],
[0.18880, 0.21857]]
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.ref, self.lat_in, self.dtime, self.ddate
del self.lon_in, self.alt_in, self.method, self.rtol
def test_get_aacgm_coord_arr_single_val(self):
"""Test array AACGMV2 calculation for a single value"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in[0], self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_list_single(self):
"""Test array AACGMV2 calculation for list input of single values"""
self.out = aacgmv2.get_aacgm_coord_arr([self.lat_in[0]],
[self.lon_in[0]],
[self.alt_in[0]], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_arr_single(self):
"""Test array AACGMV2 calculation for array with a single value"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array([self.lat_in[0]]),
np.array([self.lon_in[0]]),
np.array([self.alt_in[0]]),
self.dtime, self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_list(self):
"""Test array AACGMV2 calculation for list input"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_arr(self):
"""Test array AACGMV2 calculation for an array"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array(self.lat_in),
np.array(self.lon_in),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_list_mix(self):
"""Test array AACGMV2 calculation for a list and floats"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_arr_mix(self):
"""Test array AACGMV2 calculation for an array and floats"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array(self.lat_in),
self.lon_in[0], self.alt_in[0],
self.dtime, self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_badidea(self):
"""Test array AACGMV2 calculation for BADIDEA"""
self.method = "|".join([self.method, "BADIDEA"])
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in[0], self.lon_in[0],
[3000.0], self.dtime,
self.method)
self.ref = [[64.3481], [83.2885], [0.3306]]
self.evaluate_output()
def test_get_aacgm_coord_arr_location_failure(self):
"""Test array AACGMV2 calculation with a bad location"""
self.out = aacgmv2.get_aacgm_coord_arr([0], [0], [0], self.dtime,
self.method)
np.testing.assert_equal(len(self.out), len(self.ref))
assert [isinstance(oo, np.ndarray) and len(oo) == 1 for oo in self.out]
assert np.any([np.isnan(oo) for oo in self.out])
def test_get_aacgm_coord_arr_mult_failure(self):
"""Test aacgm_coord_arr failure with multi-dim array input"""
with pytest.raises(ValueError):
(self.mlat_out, self.mlon_out,
self.mlt_out) = aacgmv2.get_aacgm_coord_arr(
np.array([[60, 61, 62], [63, 64, 65]]), 0, 300, self.dtime)
def test_get_aacgm_coord_arr_time_failure(self):
"""Test array AACGMV2 calculation with a bad time"""
with pytest.raises(ValueError):
aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in, self.alt_in,
None, self.method)
def test_get_aacgm_coord_arr_mlat_failure(self):
"""Test error return for co-latitudes above 90 for an array"""
self.lat_in = [91, 60, -91]
with pytest.raises(ValueError):
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
def test_get_aacgm_coord_arr_datetime_date(self):
"""Test array AACGMV2 calculation with date and datetime input"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.ref = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_maxalt_failure(self):
"""test aacgm_coord_arr failure for an altitude too high for coeff"""
self.method = ""
self.alt_in = [2001 for ll in self.lat_in]
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
np.testing.assert_equal(len(self.out), len(self.ref))
assert [isinstance(oo, np.ndarray) and len(oo) == len(self.lat_in)
for oo in self.out]
assert np.all(np.isnan(np.array(self.out)))
class TestConvertCode:
def setup(self):
self.c_method_code = None
self.ref_code = None
self.out = None
def teardown(self):
del self.c_method_code, self.ref_code, self.out
def set_c_code(self):
""" Utility test to get desired C method code"""
if self.ref_code is not None:
self.ref_code = self.ref_code.upper()
self.c_method_code = getattr(aacgmv2._aacgmv2, self.ref_code)
def set_bad_c_code(self):
""" Test failure to get bad code name"""
self.ref_code = "not_a_valid_code"
with pytest.raises(AttributeError):
self.set_c_code()
@pytest.mark.parametrize('method_code',
[('G2A'), ('A2G'), ('TRACE'), ('ALLOWTRACE'),
('BADIDEA'), ('GEOCENTRIC'), ('g2a')])
def test_standard_convert_str_to_bit(self, method_code):
"""Test conversion from string code to bit for standard cases"""
self.ref_code = method_code
self.set_c_code()
self.out = aacgmv2.convert_str_to_bit(method_code)
np.testing.assert_equal(self.out, self.c_method_code)
@pytest.mark.parametrize('str_code,bit_ref',
[("G2A | trace",
aacgmv2._aacgmv2.G2A + aacgmv2._aacgmv2.TRACE),
("ggoogg|", aacgmv2._aacgmv2.G2A)])
def test_non_standard_convert_str_to_bit(self, str_code, bit_ref):
"""Test conversion from string code to bit for non-standard cases"""
self.out = aacgmv2.convert_str_to_bit(str_code)
np.testing.assert_equal(self.out, bit_ref)
@pytest.mark.parametrize('bool_dict,method_code',
[({}, 'G2A'), ({'a2g': True}, 'A2G'),
({'trace': True}, 'TRACE'),
({'allowtrace': True}, 'ALLOWTRACE'),
({'badidea': True}, 'BADIDEA'),
({'geocentric': True}, 'GEOCENTRIC')])
def test_convert_bool_to_bit(self, bool_dict, method_code):
"""Test conversion from Boolean code to bit"""
self.ref_code = method_code
self.set_c_code()
self.out = aacgmv2.convert_bool_to_bit(**bool_dict)
np.testing.assert_equal(self.out, self.c_method_code)
class TestMLTConvert:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.dtime2 = dt.datetime(2015, 1, 1, 10, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.mlon_out = None
self.mlt_out = None
self.mlt_diff = None
self.mlon_list = [270.0, 80.0, -95.0]
self.mlt_list = [12.0, 25.0, -1.0]
self.mlon_comp = [-101.670617955439, 93.329382044561, 63.329382044561]
self.mlt_comp = [12.7780412, 0.11137453, 12.44470786]
self.diff_comp = np.ones(shape=(3,)) * -10.52411552
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.mlon_out, self.mlt_out, self.mlt_list, self.mlon_list
del self.mlon_comp, self.mlt_comp, self.mlt_diff, self.diff_comp
def test_date_input(self):
"""Test to see that the date input works"""
self.mlt_out = aacgmv2.convert_mlt(self.mlon_list, self.ddate,
m2a=False)
np.testing.assert_allclose(self.mlt_out, self.mlt_comp, rtol=1.0e-4)
def test_datetime_exception(self):
"""Test to see that a value error is raised with bad time input"""
with pytest.raises(ValueError):
self.mlt_out = aacgmv2.wrapper.convert_mlt(self.mlon_list, 1997)
def test_inv_convert_mlt_single(self):
"""Test MLT inversion for a single value"""
for i, mlt in enumerate(self.mlt_list):
self.mlon_out = aacgmv2.convert_mlt(mlt, self.dtime, m2a=True)
np.testing.assert_almost_equal(self.mlon_out, self.mlon_comp[i],
decimal=4)
def test_inv_convert_mlt_list(self):
"""Test MLT inversion for a list"""
self.mlon_out = aacgmv2.convert_mlt(self.mlt_list, self.dtime, m2a=True)
np.testing.assert_allclose(self.mlon_out, self.mlon_comp, rtol=1.0e-4)
def test_inv_convert_mlt_arr(self):
"""Test MLT inversion for an array"""
self.mlon_out = aacgmv2.convert_mlt(np.array(self.mlt_list), self.dtime,
m2a=True)
np.testing.assert_allclose(self.mlon_out, self.mlon_comp, rtol=1.0e-4)
def test_inv_convert_mlt_wrapping(self):
"""Test MLT wrapping"""
self.mlon_out = aacgmv2.convert_mlt(np.array([1, 25, -1, 23]),
self.dtime, m2a=True)
np.testing.assert_almost_equal(self.mlon_out[0], self.mlon_out[1],
decimal=6)
np.testing.assert_almost_equal(self.mlon_out[2], self.mlon_out[3],
decimal=6)
def test_mlt_convert_mlon_wrapping(self):
"""Test mlon wrapping"""
self.mlt_out = aacgmv2.convert_mlt(np.array([270, -90, 1, 361]),
self.dtime, m2a=False)
np.testing.assert_almost_equal(self.mlt_out[0], self.mlt_out[1],
decimal=6)
np.testing.assert_almost_equal(self.mlt_out[2], self.mlt_out[3],
decimal=6)
def test_mlt_convert_single(self):
"""Test MLT calculation for a single value"""
for i, mlon in enumerate(self.mlon_list):
self.mlt_out = aacgmv2.convert_mlt(mlon, self.dtime, m2a=False)
np.testing.assert_almost_equal(self.mlt_out, self.mlt_comp[i],
decimal=4)
def test_mlt_convert_list(self):
"""Test MLT calculation for a list"""
self.mlt_out = aacgmv2.convert_mlt(self.mlon_list, self.dtime,
m2a=False)
np.testing.assert_allclose(self.mlt_out, self.mlt_comp, rtol=1.0e-4)
def test_mlt_convert_arr(self):
"""Test MLT calculation for an array"""
self.mlt_out = aacgmv2.convert_mlt(np.array(self.mlon_list),
self.dtime, m2a=False)
np.testing.assert_allclose(self.mlt_out, self.mlt_comp, rtol=1.0e-4)
def test_mlt_convert_list_w_times(self):
"""Test MLT calculation for data and time arrays"""
self.dtime = [self.dtime for dd in self.mlon_list]
self.mlt_out = aacgmv2.convert_mlt(self.mlon_list, self.dtime,
m2a=False)
np.testing.assert_allclose(self.mlt_out, self.mlt_comp, rtol=1.0e-4)
def test_mlt_convert_change(self):
"""Test that MLT changes with UT"""
self.mlt_out = aacgmv2.convert_mlt(self.mlon_list, self.dtime)
self.mlt_diff = np.array(self.mlt_out) \
- np.array(aacgmv2.convert_mlt(self.mlon_list, self.dtime2))
| np.testing.assert_allclose(self.mlt_diff, self.diff_comp, rtol=1.0e-4) | numpy.testing.assert_allclose |
from __future__ import print_function
import numpy as np
import pytest
EPS = 1e-8
def kmeans_cluster(x, k, max_iter=10, threshold=1e-3, verbose=False):
# init
centers = np.zeros([k, x.shape[-1]])
for i in range(k):
total_num = len(x)
chosen_num = max(1, total_num / k)
random_ids = np.random.choice(total_num, chosen_num, replace=False)
centers[i, :] = np.mean(x[random_ids])
cur_total_dist = np.float('inf')
dist = np.zeros([k, len(x)])
for i in range(max_iter):
for j in range(k):
for m, p in enumerate(x):
dist[j, m] = np.mean((p - centers[j]) ** 2)
min_idx = | np.argmin(dist, 0) | numpy.argmin |
# -*- coding: utf-8 -*-
"""
This module contains functions relating to more advanced binning techniques,
in particular recursive binning algorithms.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from tact import util
def _meets_num_threshold(xw, cat, s_num_thresh=1, b_num_thresh=1,
s_err_thresh=0.3, b_err_thresh=0.3):
"""
Check if the number of signal and background events in xw are above the
specified number threshold and above the specified error threshold.
Parameters
----------
xw : array-like, shape=N
Event weights.
cat : 1D array, shape=N
Array containing labels describing whether an entry is signal (1 or
True) or background (0 or False).
s_num_thresh, b_num_thresh, float, optional
Signal and background event count thresholds.
s_num_thresh, b_num_thresh, float, optional
Signal and background event bin error thresholds.
Returns
-------
bool
True if meets either threshold, False otherwise.
"""
sums = xw[cat == 1].sum()
sumb = xw[cat == 0].sum()
return sumb < b_num_thresh or sums < s_num_thresh \
or (xw[cat == 0] ** 2).sum() ** 0.5 / sumb > s_err_thresh \
or (xw[cat == 1] ** 2).sum() ** 0.5 / sums > b_err_thresh
def _recursive_median_tree(x, cat, xw=None, s_num_thresh=1, b_num_thresh=1,
s_err_thresh=0.3, b_err_thresh=0.3):
"""
Perform binning by recursively finding the median.
The provided data is split at the median. The resulting subsamples are
continually split until doing so would result in a sample with less than
s_num_thresh (b_num_thresh) signal (background) events or a % bin error
greater than s_err_thresh in signal or b_err_thresh in background.
Parameters
----------
x : array-like, shape=N
Data to be binned.
cat : 1D array, shape=N
Array containing labels describing whether an entry is signal (1 or
True) or background (0 or False).
xw : array-like, shape=N, optional
Weights for samples in x. If None, equal weights are used.
s_num_thresh, b_num_thresh, float, optional
Minimum number of samples in a cluster in signal or background before
splitting is stopped.
s_err_thresh, b_err_thresh, float, optional
Maximum percentage error in a cluster in signal or background before
splitting is stopped.
Returns
-------
BinaryTree
BinaryTree containing subsample medians.
"""
if xw is None:
xw = np.ones(len(x))
median = | np.median(x) | numpy.median |
#!/usr/bin/python
"""
Perform HI survey Fisher forecast based on Pedro's formalism (see notes from
August 2013).
Requires up-to-date NumPy, SciPy (tested with version 0.11.0) and matplotlib.
A number of functions can optionally use MPI (mpi4py).
(<NAME> & <NAME>, 2013--2014)
"""
import numpy as np
import scipy.integrate
import scipy.interpolate
from scipy.misc import derivative
import pylab as P
import matplotlib.patches
import matplotlib.cm
from .units import *
import uuid, os, sys, copy
from hashlib import md5
from . import camb_wrapper as camb
from . import mg_growth
from tempfile import gettempdir
# No. of samples in log space in each dimension. 300 seems stable for (AA).
NSAMP_K = 500 # 1000
NSAMP_U = 1500 # 3000
# Debug settings (set all to False for normal operation)
DBG_PLOT_CUMUL_INTEGRAND = False # Plot k-space integrand of the dP/P integral
INF_NOISE = 1e200 # Very large finite no. used to denote infinite noise
EXP_OVERFLOW_VAL = 250. # Max. value of exponent for np.exp() before assuming overflow
# Decide which RSD function to use (N.B. interpretation of sigma_NL changes
# slightly depending on option)
RSD_FUNCTION = 'kaiser'
#RSD_FUNCTION = 'loeb'
# Location of CAMB fiducial P(k) file
# NOTE: Currently expects CAMB P(k) needs to be at chosen z value (z=0 here).
CAMB_KMAX = 20. / 0.7 # Max. k for CAMB, in h Mpc^-1
CAMB_EXEC = "/home/phil/lynx/oslo/bao21cm/camb" # Directory containing camb executable
################################################################################
# Plotting functions
################################################################################
def figure_of_merit(p1, p2, F, cov=None, twosigma=False):
"""
DETF Figure of Merit, defined as the area inside the 95% contour of w0,wa.
fom = 1 / [ sqrt( |cov(w0, wa)| ) ], where cov = F^-1, and cov(w0, wa)
is the w0,wa 2x2 sub-matrix of the covmat.
If twosigma=True, there is an additional factor of 1/4 that comes from
looking at the 95% (2-sigma) contours.
"""
if cov == None: cov = np.linalg.inv(F)
# Calculate determinant
c11 = cov[p1,p1]
c22 = cov[p2,p2]
c12 = cov[p1,p2]
det = c11*c22 - c12**2.
fom = 1. / np.sqrt(det)
if twosigma: fom *= 0.25
return fom
def ellipse_for_fisher_params(p1, p2, F, Finv=None):
"""
Return covariance ellipse parameters (width, height, angle) from
Fisher matrix F, for parameters in the matrix with indices p1 and p2.
See arXiv:0906.4123 for expressions.
"""
if Finv is not None:
cov = Finv
else:
cov = np.linalg.inv(F)
c11 = cov[p1,p1]
c22 = cov[p2,p2]
c12 = cov[p1,p2]
# Calculate ellipse parameters (Eqs. 2-4 of Coe, arXiv:0906.4123)
y1 = 0.5*(c11 + c22)
y2 = np.sqrt( 0.25*(c11 - c22)**2. + c12**2. )
a = 2. * np.sqrt(y1 + y2) # Factor of 2 because def. is *total* width of ellipse
b = 2. * np.sqrt(y1 - y2)
# Flip major/minor axis depending on which parameter is dominant
if c11 >= c22:
w = a; h = b
else:
w = b; h = a
# Handle c11==c22 case for angle calculation
if c11 != c22:
ang = 0.5* | np.arctan( 2.*c12 / (c11 - c22) ) | numpy.arctan |
import numpy as np
import pytest
# pylint: disable=line-too-long
from tensornetwork.block_sparse.charge import BaseCharge, intersect, fuse_ndarrays, U1Charge, fuse_degeneracies, fuse_charges
def test_BaseCharge_charges():
D = 100
B = 6
np.random.seed(10)
charges = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
q1 = BaseCharge(charges)
np.testing.assert_allclose(q1.charges, charges)
def test_BaseCharge_generic():
D = 300
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
unique = np.unique(q, axis=1)
Q = BaseCharge(charges=q)
assert Q.dim == 300
assert Q.num_symmetries == 2
assert Q.num_unique == unique.shape[1]
def test_BaseCharge_len():
D = 300
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
Q = BaseCharge(charges=q)
assert len(Q) == 300
def test_BaseCharge_copy():
D = 300
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
Q = BaseCharge(charges=q)
Qcopy = Q.copy()
assert Q.charge_labels is not Qcopy.charge_labels
assert Q.unique_charges is not Qcopy.unique_charges
np.testing.assert_allclose(Q.charge_labels, Qcopy.charge_labels)
np.testing.assert_allclose(Q.unique_charges, Qcopy.unique_charges)
def test_BaseCharge_unique():
D = 3000
B = 5
np.random.seed(10)
q = np.random.randint(-B // 2, B // 2 + 1, (2, D)).astype(np.int16)
Q = BaseCharge(charges=q, charge_types=[U1Charge, U1Charge])
expected = np.unique(
q, return_index=True, return_inverse=True, return_counts=True, axis=1)
actual = Q.unique(return_index=True, return_inverse=True, return_counts=True)
assert np.all(actual[0].charges == expected[0])
assert np.all(actual[1] == expected[1])
assert np.all(actual[2] == expected[2])
assert np.all(actual[3] == expected[3])
def test_BaseCharge_unique_sort():
np.random.seed(10)
unique = np.array([1, 0, -1])
labels = np.random.randint(0, 3, 100)
Q = U1Charge(charges=unique, charge_labels=labels)
actual = Q.unique(
return_index=True, return_inverse=True, return_counts=True, sort=False)
np.testing.assert_allclose(actual[0].unique_charges, [[1, 0, -1]])
def test_intersect_1():
a = np.array([[0, 1, 2], [2, 3, 4]])
b = np.array([[0, -2, 6], [2, 3, 4]])
out = intersect(a, b, axis=1)
np.testing.assert_allclose(np.array([[0], [2]]), out)
def test_intersect_2():
a = np.array([[0, 1, 2], [2, 3, 4]])
b = np.array([[0, -2, 6, 2], [2, 3, 4, 4]])
out, la, lb = intersect(a, b, axis=1, return_indices=True)
np.testing.assert_allclose(np.array([[0, 2], [2, 4]]), out)
np.testing.assert_allclose(la, [0, 2])
np.testing.assert_allclose(lb, [0, 3])
def test_intersect_3():
a = np.array([0, 1, 2, 3, 4])
b = np.array([0, -1, 4])
out = intersect(a, b)
np.testing.assert_allclose([0, 4], out)
def test_intersect_4():
a = np.array([0, 1, 2, 3, 4])
b = np.array([0, -1, 4])
out, la, lb = intersect(a, b, return_indices=True)
np.testing.assert_allclose([0, 4], out)
np.testing.assert_allclose(la, [0, 4])
np.testing.assert_allclose(lb, [0, 2])
def test_intersect_raises():
np.random.seed(10)
a = np.random.randint(0, 10, (4, 5))
b = np.random.randint(0, 10, (4, 6))
with pytest.raises(ValueError):
intersect(a, b, axis=0)
c = np.random.randint(0, 10, (3, 7))
with pytest.raises(ValueError):
intersect(a, c, axis=1)
with pytest.raises(NotImplementedError):
intersect(a, c, axis=2)
d = np.random.randint(0, 10, (3, 7, 3))
e = np.random.randint(0, 10, (3, 7, 3))
with pytest.raises(NotImplementedError):
intersect(d, e, axis=1)
def test_fuse_ndarrays():
d1 = np.asarray([0, 1])
d2 = np.asarray([2, 3, 4])
fused = fuse_ndarrays([d1, d2])
np.testing.assert_allclose(fused, [2, 3, 4, 3, 4, 5])
def test_fuse_degeneracies():
d1 = np.asarray([0, 1])
d2 = np.asarray([2, 3, 4])
fused_degeneracies = fuse_degeneracies(d1, d2)
np.testing.assert_allclose(fused_degeneracies, np.kron(d1, d2))
def test_U1Charge_charges():
D = 100
B = 6
np.random.seed(10)
charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
q1 = U1Charge(charges)
assert np.all(q1.charges == charges)
def test_U1Charge_dual():
D = 100
B = 6
np.random.seed(10)
charges = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
q1 = U1Charge(charges)
assert np.all(q1.dual(True).charges == -charges)
def get_charges(B0, B1, D, num_charges):
return [
np.random.randint(B0, B1 + 1, D).astype(np.int16)
for _ in range(num_charges)
]
def fuse_many_charges(num_charges,
num_charge_types,
seed,
D,
B,
use_flows=False):
np.random.seed(seed)
if use_flows:
flows = np.random.choice([True, False], num_charges, replace=True)
else:
flows = np.asarray([False] * num_charges)
np_flows = np.ones(num_charges, dtype=np.int16)
np_flows[flows] = -1
charges = [
get_charges(-B // 2, B // 2, D, num_charge_types)
for _ in range(num_charges)
]
fused = [
fuse_ndarrays([charges[n][m] * np_flows[n]
for n in range(num_charges)])
for m in range(num_charge_types)
]
final_charges = [U1Charge(charges[n][0]) for n in range(num_charges)]
for n in range(num_charges):
for m in range(1, num_charge_types):
final_charges[n] = final_charges[n] @ U1Charge(charges[n][m])
np_target_charges = np.random.randint(-B, B, num_charge_types, dtype=np.int16)
target_charges = [
U1Charge(np.array([np_target_charges[n]]))
for n in range(num_charge_types)
]
target = target_charges[0]
for m in range(1, num_charge_types):
target = target @ target_charges[m]
final = final_charges[0] * flows[0]
for n in range(1, num_charges):
final = final + final_charges[n] * flows[n]
nz_1 = np.nonzero(final == target)[0]
masks = [fused[m] == target.charges[m, 0] for m in range(num_charge_types)]
#pylint: disable=no-member
nz_2 = np.nonzero(np.logical_and.reduce(masks))[0]
return nz_1, nz_2
@pytest.mark.parametrize('use_flows', [True, False])
@pytest.mark.parametrize('num_charges, num_charge_types, D, B',
[(2, 1, 1000, 6), (2, 2, 1000, 6), (3, 1, 100, 6),
(3, 2, 100, 6), (3, 3, 100, 6)])
def test_U1Charge_fusion(num_charges, num_charge_types, D, B, use_flows):
nz_1, nz_2 = fuse_many_charges(
num_charges=num_charges,
num_charge_types=num_charge_types,
seed=20,
D=D,
B=B,
use_flows=use_flows)
assert len(nz_1) > 0
assert len(nz_2) > 0
assert np.all(nz_1 == nz_2)
def test_BaseCharge_intersect():
q1 = np.array([[0, 1, 2, 0, 6], [2, 3, 4, -1, 4]])
q2 = np.array([[0, -2, 6], [2, 3, 4]])
Q1 = BaseCharge(charges=q1)
Q2 = BaseCharge(charges=q2)
res = Q1.intersect(Q2)
np.testing.assert_allclose(res.charges, np.asarray([[0, 6], [2, 4]]))
def test_BaseCharge_intersect_2():
c1 = U1Charge(np.array([1, 0, -1]), charge_labels=np.array([2, 0, 1]))
c2 = U1Charge(np.array([-1, 0, 1]))
res = c1.intersect(c2)
np.testing.assert_allclose(res.charges, [[-1, 0, 1]])
def test_BaseCharge_intersect_return_indices():
q1 = np.array([[0, 1, 2, 0, 6], [2, 3, 4, -1, 4]])
q2 = np.array([[-2, 0, 6], [3, 2, 4]])
Q1 = BaseCharge(charges=q1)
Q2 = BaseCharge(charges=q2)
res, i1, i2 = Q1.intersect(Q2, return_indices=True)
#res, i1, i2 = intersect(q1, q2, axis=1, return_indices=True)
np.testing.assert_allclose(res.charges, np.asarray([[0, 6], [2, 4]]))
np.testing.assert_allclose(i1, [0, 4])
np.testing.assert_allclose(i2, [1, 2])
def test_U1Charge_matmul():
D = 1000
B = 5
np.random.seed(10)
C1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
C3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
q1 = U1Charge(C1)
q2 = U1Charge(C2)
q3 = U1Charge(C3)
Q = q1 @ q2 @ q3
Q_ = BaseCharge(
np.stack([C1, C2, C3], axis=0),
charge_labels=None,
charge_types=[U1Charge, U1Charge, U1Charge])
assert np.all(Q.charges == Q_.charges)
def test_U1Charge_matmul_raises():
B = 5
np.random.seed(10)
C1 = np.random.randint(-B // 2, B // 2 + 1, 10).astype(np.int16)
C2 = np.random.randint(-B // 2, B // 2 + 1, 11).astype(np.int16)
q1 = U1Charge(C1)
q2 = U1Charge(C2)
with pytest.raises(ValueError):
q1 @ q2
def test_U1Charge_identity():
D = 100
B = 5
np.random.seed(10)
C1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
C3 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
q1 = U1Charge(C1)
q2 = U1Charge(C2)
q3 = U1Charge(C3)
Q = q1 @ q2 @ q3
eye = Q.identity_charges
np.testing.assert_allclose(eye.unique_charges, 0)
assert eye.num_symmetries == 3
def test_U1Charge_mul():
D = 100
B = 5
np.random.seed(10)
C1 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
C2 = np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
q1 = U1Charge(C1)
q2 = U1Charge(C2)
q = q1 @ q2
res = q * True
np.testing.assert_allclose(res.charges, (-1) * np.stack([C1, C2]))
def test_fuse_charges():
num_charges = 5
B = 6
D = 10
np_charges = [
np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
for _ in range(num_charges)
]
charges = [U1Charge(c) for c in np_charges]
flows = [True, False, True, False, True]
np_flows = np.ones(5, dtype=np.int16)
np_flows[flows] = -1
fused = fuse_charges(charges, flows)
np_fused = fuse_ndarrays([c * f for c, f in zip(np_charges, np_flows)])
np.testing.assert_allclose(np.squeeze(fused.charges), np_fused)
def test_fuse_charges_raises():
num_charges = 5
B = 6
D = 10
np_charges = [
np.random.randint(-B // 2, B // 2 + 1, D).astype(np.int16)
for _ in range(num_charges)
]
charges = [U1Charge(c) for c in np_charges]
flows = [True, False, True, False]
with pytest.raises(ValueError):
fuse_charges(charges, flows)
def test_reduce():
q = np.array([[0, 1, 2, 0, 6, 1, -9, 0, -7], [2, 3, 4, -1, 4, 3, 1, 2, 0]])
Q = BaseCharge(charges=q)
target_charge = np.array([[0, 1, 6, -12], [2, 3, 4, 16]])
expected = np.array([[0, 1, 6, 1, 0], [2, 3, 4, 3, 2]])
res, locs = Q.reduce(target_charge, return_locations=True)
np.testing.assert_allclose(res.charges, expected)
np.testing.assert_allclose(locs, [0, 1, 4, 5, 7])
def test_getitem():
q1 = np.array([0, 1, 2, 0, 6, 1, -9, 0, -7])
q2 = np.array([2, 3, 4, -1, 4, 3, 1, 2, 0])
Q1 = U1Charge(charges=q1)
Q2 = U1Charge(charges=q2)
Q = Q1 @ Q2
t1 = Q[5]
np.testing.assert_allclose(t1.charges, [[1], [3]])
assert np.all([t1.charge_types[n] == U1Charge for n in range(2)])
t2 = Q[[2, 5, 7]]
assert np.all([t2.charge_types[n] == U1Charge for n in range(2)])
np.testing.assert_allclose(t2.charges, [[2, 1, 0], [4, 3, 2]])
t3 = Q[[5, 2, 7]]
assert np.all([t3.charge_types[n] == U1Charge for n in range(2)])
np.testing.assert_allclose(t3.charges, [[1, 2, 0], [3, 4, 2]])
def test_isin():
np.random.seed(10)
c1 = U1Charge(np.random.randint(-5, 5, 1000, dtype=np.int16))
c2 = U1Charge(np.random.randint(-5, 5, 1000, dtype=np.int16))
c = c1 @ c2
c3 = np.array([[-1, 0, 1], [-1, 0, 1]])
n = c.isin(c3)
for m in np.nonzero(n)[0]:
charges = c[m].charges
#pylint: disable=unsubscriptable-object
assert np.any(
[np.array_equal(charges[:, 0], c3[:, k]) for k in range(c3.shape[1])])
for m in np.nonzero(np.logical_not(n))[0]:
charges = c[m].charges
#pylint: disable=unsubscriptable-object
assert not np.any(
[np.array_equal(charges[:, 0], c3[:, k]) for k in range(c3.shape[1])])
def test_isin_2():
np.random.seed(10)
c1 = U1Charge(np.random.randint(-5, 5, 1000, dtype=np.int16))
c2 = U1Charge(np.random.randint(-5, 5, 1000, dtype=np.int16))
c = c1 @ c2
c3 = U1Charge(np.array([-1, 0, 1])) @ U1Charge(np.array([-1, 0, 1]))
n = c.isin(c3)
for m in np.nonzero(n)[0]:
charges = c[m].charges
assert np.any([
np.array_equal(charges[:, 0], c3.charges[:, k])
for k in range(c3.charges.shape[1])
])
for m in np.nonzero( | np.logical_not(n) | numpy.logical_not |
from astropy.cosmology import WMAP7 as cosmo
from astropy.constants import c as C
from astropy import units as u
from astropy.table import Table
from astropy.io import ascii
import numpy as np
from linetools import utils as ltu
from linetools.isgm.abscomponent import AbsComponent
from linetools.spectra.io import readspec
from linetools.spectra.xspectrum1d import XSpectrum1D
import linetools.isgm.io as ltiio
from linetools.isgm import utils as ltiu
from pyntejos.io import table_from_marzfile
import json
import glob
import matplotlib.pyplot as plt
"""Module for utils"""
def get_closest_ind(array, value):
"""Gets the index of array such that array[ind] is the
closest element to given value"""
ind = np.argmin(np.fabs(value - array))
return ind
def get_closest_inds(array, values):
"""Gets the indices of array such that array[inds] give the closest
elements to given values, respectively. Note: there could come
with duplication dependind on the `values` array."""
inds = [get_closest_ind(array, value) for value in values]
return inds
def compare_z(z1, z2, dv):
"""Return true if the difference between z1 and z2 is within dv at
the mean redshift. Otherwise return False. dv has to be much
smaller and speed of light as the function does not account for
relativity."""
z1 = np.array(z1)
z2 = np.array(z2)
dv = np.array(dv)
dz = np.fabs(z1 - z2)
z = np.mean([z1, z2])
if dz / (1. + z) < dv / C.to('km/s').value:
return True
else:
return False
def group_z(z, dv=1000):
"""Group redshifts within dv (km/s) at the redshift of the
group. Returns an array of id_membership, of the same dimension
than z"""
z_original = np.array(z)
z = np.array(z)
z.sort()
ids = np.zeros(len(z)) # This is the output, here we store the id_membership of each z
ids_aux = np.zeros(len(z)) # This is the same, but matching a sorted z array
q = 0 # counter for groups
ids[0] = q
for i in np.arange(1, len(z)):
if compare_z(z[i], z[i - 1], dv):
ids_aux[i] = q
else:
q += 1
ids_aux[i] = q
# remap ids_aux to ids
for i in np.arange(len(z_original)):
cond = z_original[i] == z
if np.sum(cond) > 1: # in case there are 2 or more with same z
ids[i] = ids_aux[cond][0]
else:
ids[i] = ids_aux[cond]
return ids.astype(int)
def poisson_err(n):
"""Gets poissonian error analytical approximation from equations of
Gherels 1986. Returns the upper and lower uncertainties"""
n = np.array(n)
errp = np.sqrt(n + 0.75) + 1.
errp = np.where(errp == errp, errp, errp)
errm = np.where(n > 0.25, np.sqrt(n - 0.25), 0)
return errp, errm
def Nmin(e, dz, s, a, a_err):
"""Estimates the minimum number of independent structures
to detect a difference in dN/dz w/r to a field value given
by dNdz|field = a +- a_err, at a statistical significance s,
using a redshift path of dz per structure"""
e = np.array(e).astype(float)
dz = np.array(dz).astype(float)
s = np.array(s).astype(float)
a = np.array(a).astype(float)
a_err = np.array(a_err).astype(float)
# this is a analytical expression was derived by N.T.
return (e / dz / a) * (s ** 2) / ((e - 1.) - s * a_err / a) ** 2
def find_edges(a):
"""Assume a is 1-D array of values, where 0 mean masked out. This
function will provide the indices of lower and upper edges of
chunks having values different than 0. Useful for spectra
analyses"""
a = np.array(a)
# append 0 before and after the original array to an auxiliary array
# in this way:
# lower case limits are always 0,something
# upper case limits are always something,0
a_aux = np.append(0, a)
a_aux = np.append(a_aux, 0)
lower = []
upper = []
for i in range(1, len(a_aux) - 1): # only for indices with original info
if (a_aux[i] != 0) and (a_aux[i - 1] == 0): # lower case
lower += [i]
if (a_aux[i] != 0) and (a_aux[i + 1] == 0): # upper case
upper += [i]
lower = np.array(lower)
upper = np.array(upper)
# lower and upper have indices of a_aux
# we substract one to get the indices in the original array a
lower = lower - 1
upper = upper - 1
assert len(lower) == len(upper), 'Something is wrong with find_edges function. Debug code!'
return lower.astype(int), upper.astype(int)
def is_local_minima(a):
"""For a given array a, it returns true for local minima"""
return ltu.is_local_minima(a)
def is_local_maxima(a):
"""For a given array a, returns true for local maxima"""
return ltu.is_local_maxima(a)
def associate_redshifts(z1, z2, dv):
"""Returns an array of same lenght as z1, with a 1 if z1 redshift
lie within dv from any of the redshifts given by the array z2;
otherwise it has a value of 0"""
z1 = np.array(z1)
z2 = np.array(z2)
association = np.zeros(len(z1))
for z in z2:
dv_aux = np.fabs(ltu.dv_from_z(z1, z))
association = np.where(dv_aux < dv, 1, association)
return association
def get_dv_closest_z(z1, z2, give_ids=False):
"""Returns an array of same lenght as z1, with the velocity difference
(in km/s) associates to the closest redshift in z2, at restframe
given by z2. (Using relativistic approximation for flat-space time;
see ltu.dv_from_z() function)
If give_ids is True , it also returns the indices of z2 where the
difference is minimum.
"""
z1 = np.array(z1)
z2 = np.array(z2)
dv = []
inds = []
for z in z1:
dv_aux = ltu.dv_from_z(z, z2)
# find minimum difference
cond = np.fabs(dv_aux) == np.min(np.fabs(dv_aux))
ind = np.where(cond)[0][0]
# append dv
dv += [dv_aux[ind]]
inds += [ind]
dv = np.array(dv)
if give_ids:
return dv, inds
else:
return dv
def clean_array(x, value=0):
"""Get rid of nan and inf in the array x, and replace them with the
given value."""
x = np.array(x)
cond = (np.isnan(x)) | (np.isinf(x))
x = np.where(cond, value, x)
return x
def get_original_indices(original, new):
"""For a pair of arrays containing the same information but sorted in a
different way, this function provides the indices associated to the
original array that make up the new array so
original[indices]=new.
Add check to make sore arrays contain exact same information
"""
original = np.array(original)
new = np.array(new)
# use np.argsort()
inds_orig_sorted = np.argsort(original)
inds_new_sorted = np.argsort(new)
# find indices such that original[indices] = new
indices_aux = | np.argsort(inds_new_sorted) | numpy.argsort |
# Copyright (c) 2020,
# ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland,
# Laboratoire de Traitement des Signaux 4 (LTS4).
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: <NAME> (rossi-mattia-at-gmail-com)
import numpy as np
import torch
from typing import Tuple, Union
def gauss_filter_1d(length: int, sigma: float) -> np.array:
"""It builds a 1D Gaussian filter.
Args:
length: number of filter taps.
sigma: standard deviation.
Returns:
A 1D Gaussian filter arranged as a `(length,)` array.
"""
# Check the filter length.
if (length % 2) == 0:
raise ValueError('The length of the filter must be odd.')
# Build the filter.
radius = int((length - 1) / 2.0)
x = np.arange(-radius, radius + 1, dtype=np.float32)
y = np.exp(- (x ** 2) / (2 * (sigma ** 2))) / (sigma * np.sqrt(2 * np.pi))
# Normalize the filter.
const = np.sum(y)
assert const != 0, 'The filter is zero everywhere.'
y = y / const
return y
def gauss_filter_deriv_1d(length: int, sigma: float) -> np.array:
"""It builds the derivative of a 1D Gaussian filter.
Args:
length: number of filter taps.
sigma: standard deviation.
Returns:
A 1D Gaussian filter derivative, arranged as a `(length,)` array.
"""
# Check the filter length.
if (length % 2) == 0:
raise ValueError('The length of the filter must be odd.')
# Build the filter.
radius = int((length - 1) / 2.0)
x = np.arange(-radius, radius + 1, dtype=np.float32)
y = gauss_filter_1d(length, sigma) * (- x / (sigma ** 2))
# Normalize the filter.
const = np.sum(np.abs(y))
assert const != 0, 'The filter is zero everywhere.'
y = y / const
# TODO: check whether this normalization makes sense.
return y
def gauss_filter_2d(size: int, sigma: float) -> np.array:
"""It builds a 2D Gaussian filter.
Args:
size: height (and width) of the filter.
sigma: standard deviation (in pixels) of the Gaussian filter.
Returns:
A 2D Gaussian filter arranged as a `(size, size)` array.
"""
# Build the filter.
y = (gauss_filter_1d(size, sigma)[:, None]).dot(gauss_filter_1d(size, sigma)[None, :])
# Normalize the filter.
const = np.sum(y)
assert const != 0, 'The filter is zero everywhere.'
y = y / const
return y
def gauss_filter_deriv_2d(size: int, sigma: float) -> np.array:
"""It builds the vertical derivative of a 2D Gaussian filter.
It builds the vertical derivative of a 2D Gaussian filter. The horizontal derivative can be obtained just by taking
the transpose of the vertical one.
Args:
size: height (and width) of the filters.
sigma: standard deviation (in pixels) of the Gaussian filter underneath the derivative filters.
Returns:
The vertical derivative of a 2D Gaussian filter arranged as a `(size, size)` array.
"""
# Build the filter.
y = (gauss_filter_deriv_1d(size, sigma)[:, None]).dot(gauss_filter_1d(size, sigma)[None, :])
# Normalize the filter.
const = np.sum(np.abs(y))
assert const != 0, 'The filter is zero everywhere.'
y = y / const
return y
def gradient_filter(size: int, sigma: float) -> torch.Tensor:
"""It builds a gradient filter for images in PyTorch tensor format.
It builds a filter that can be used with `torch.nn.functional.conv2d` to compute the gradient of a batch of images
or, more in general, of maps. The images or maps must have only one channel.
The filter is arranged as a `(2, 1, H, W)` tensor with `[0, :, :, :]` and `[1, :, :, :]` the 2D horizontal and
vertical derivative filters.
Example:
batch_nb = 5
height = 100
width = 200
size = 7
image = torch.random(batch_nb, 1, height, width)
filter = gradient_filter(7, 0.1)
pad = tuple([int((size - 1) / 2)] * 4)
image_grad = torch.nn.functional.conv2d(torch.nn.functional.pad(image, pad, mode='replicate'), filter)
In the example, `image_grad` is a `(batch_nb, 2, height, width)` tensor with `image_grad[k, 0, :, :]` and
`image_grad[k, 1, :, :]` the horizontal and vertical derivatives of the image `k`.
Args:
size: height (and width) of the filters.
sigma: standard deviation (in pixels) of the Gaussian filter underneath the derivative filters.
Returns:
The gradient filter, arranged as a `(2, 1, H, W)` tensor.
"""
# Build the vertical (y) derivative filter.
d_gauss_dy = gauss_filter_deriv_2d(size, sigma)
# Flip the filter around the (x, y) origin, as torch.nn.functional.conv2d() performs just cross-correlation rather
# than the standard convolution.
d_gauss_dy = np.fliplr(d_gauss_dy)
d_gauss_dy = np.flipud(d_gauss_dy)
# Build the horizontal (x) derivative filter, which is just the transpose of the vertical one.
d_gauss_dx = d_gauss_dy.T
# Expand the filters to make them compliant with torch.nn.functional.conv2d.
d_gauss_dy = d_gauss_dy[None, None, :, :] # [1, 1, size, size]
d_gauss_dx = d_gauss_dx[None, None, :, :] # [1, 1, size, size]
# Concatenate the two filters into a single filter with two channels.
grad_filter = | np.concatenate((d_gauss_dx, d_gauss_dy), axis=0) | numpy.concatenate |
#python makeAllGraphs.py [cutoff value] [-w for weighted matrix, -uw for unweighted]
# [-nb for no biases, -b for biases] [optional filename prefix for saving]
import sys
import makeGraph
import makeGraphNoBias
import numpy as np
path = ""
'''
MAIN HAS BEEN MODIFIED TO RETURN THE MATRIX INSTEAD OF SAVING IT.
'''
def main(argv):
assert len(argv) >= 4, 'ERROR: Please enter required paramters. See readme.txt ...'
global path
#essential arguments
cutoff = float(argv[1])
saveName = str(argv[4])
path = argv[5]
matrix = None
#flags specifying format of matrix
if argv[2] == '-w':
if argv[3] == '-nb':
print('Creating weighted adjacency matrix without including the biases')
matrix = getWeightedAdjacencyMatrixNoBias()
elif argv[3] == '-b':
print('Creating weighted adjacency matrix, including the biases')
matrix = getWeightedAdjacencyMatrix()
else:
print('ERROR: incorrect flag. Please specify -b to include biases and -nb for no biases.')
elif argv[2] == '-uw':
if argv[3] == '-nb':
print('Creating graphs without including the biases')
matrix = getUnweightedAdjacencyMatrixNoBias(cutoff)
elif argv[3] == '-b':
print('Creating graphs including the biases')
matrix = getUnweightedAdjacencyMatrix(cutoff)
else:
print('ERROR: incorrect flag. Please specify -b to include biases and -nb for no biases.')
else:
print('ERROR: incorrect flag. Please specify -w for weighted and -uw for unweighted as the second parameter.')
#save output
#saveBinary(matrix, saveName)
'''
THIS NEEDS TO BE CHANGED BACK
'''
return matrix
def getUnweightedAdjacencyMatrixNoBias(cutoff):
w1 = np.load(path+'_W1.npy')
w2 = np.load(path+'_W2.npy')
G = makeGraphNoBias.makeGraph([w1,w2], cutoff)
print('Vertices:', len(G[0]),'\tEdges:',len(G[1]))
return graphToAdjacencyMatrix(G)
def getUnweightedAdjacencyMatrix(cutoff):
w1 = np.load(path+'_W1.npy')
w2 = np.load(path+'_W2.npy')
b1 = np.load(path+'_b1.npy')
b2 = np.load(path+'_b2.npy')
b1 = np.reshape(b1, (1, -1))
b2 = np.reshape(b2, (1, -1))
G = makeGraph.makeGraph([w1,w2],[b1,b2], cutoff)
print('Vertices:', len(G[0]),'\tEdges:',len(G[1]))
return graphToAdjacencyMatrix(G)
def getWeightedAdjacencyMatrix():
w1 = np.load(path+'_W1.npy')
w2 = np.load(path+'_W2.npy')
b1 = np.load(path+'_b1.npy')
b2 = np.load(path+'_b2.npy')
b1 = np.reshape(b1, (1, -1))
b2 = np.reshape(b2, (1, -1))
dim = w1.shape[0] + 1 + w2.shape[0] + 1 + w2.shape[1]
m = np.zeros((dim,dim))
inputLayerOffset = w1.shape[0] # +1 for bias layer and +1 for next starting point
hiddenLayerOffset = inputLayerOffset + 1 + w2.shape[0]
#place w1
placeSmallerInBiggerMatrix(0, inputLayerOffset + 1, w1,m)
#place b1
placeSmallerInBiggerMatrix(inputLayerOffset, inputLayerOffset + 1, b1,m)
#place w2
placeSmallerInBiggerMatrix(inputLayerOffset+1, hiddenLayerOffset + 1, w2, m)
#place b2
placeSmallerInBiggerMatrix(hiddenLayerOffset, hiddenLayerOffset + 1, b2, m)
assert check_symmetric(m)
return m
def getWeightedAdjacencyMatrixNoBias():
w1 = np.load(path+'_W7.npy')
w2 = | np.load(path+'_W8.npy') | numpy.load |
from __future__ import print_function
import os
import time
import cv2
import random
import mxnet as mx
import numpy as np
from six.moves import cPickle, range
from .module import MutableModule
from .helper import get_scale_factor
from ..config import config
from ..config import config as cfg
from ..io import image
from ..processing.bbox_transform import nonlinear_pred, clip_boxes
from ..processing.nms import py_nms_wrapper
import matplotlib.pyplot as plt
from keypoint_utils import decode_keypoint, get_skeletons, get_keypoint_wrt_box
bbox_pred = nonlinear_pred
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
boxes = output['rpn_rois_output'].asnumpy()[:, :, 1:]
scores = output['rpn_scores_output'].asnumpy()[:, :, 0:1]
bs = boxes.shape[0]
box_score = np.concatenate([boxes, scores], axis=2).tolist()
for img_id in range(len(box_score)):
for box_id in range(len(box_score[img_id])):
box_score[img_id][box_id] = tuple(box_score[img_id][box_id])
for img_id in range(len(box_score)):
box_score[img_id] = list(set(box_score[img_id]))
box_score[img_id] = np.array(box_score[img_id])
scores = [single_img_box_score[:, 4] for single_img_box_score in box_score]
boxes = [single_img_box_score[:, :4] for single_img_box_score in box_score]
for i in range(len(scores)):
order = np.argsort(-scores[i])
boxes[i] = boxes[i][order]
scores[i] = scores[i][order]
for i in range(len(scores)):
boxes[i] = boxes[i][:config.TRAIN.PROPOSAL_POST_NMS_TOP_N]
scores[i] = scores[i][:config.TRAIN.PROPOSAL_POST_NMS_TOP_N][:, None]
for i in range(len(boxes)):
num_proposals = (boxes[i].shape)[0]
if num_proposals < config.TRAIN.PROPOSAL_POST_NMS_TOP_N:
pad_num = config.TRAIN.PROPOSAL_POST_NMS_TOP_N - num_proposals
for idx in range(pad_num):
rand_idx = np.random.randint(0, num_proposals)
boxes[i] = np.row_stack((boxes[i], boxes[i][rand_idx]))
scores[i] = np.row_stack((scores[i], scores[i][rand_idx]))
boxes = np.array(boxes)
scores = np.array(scores)
# transform to original scale
boxes = boxes / scale[:, None, None]
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[:, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
for ii in range(scores.shape[0] - data_batch.pad):
# assemble proposals
dets = np.concatenate((boxes[ii], scores[ii]), axis=-1)
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4] > thresh)
dets = dets[keep]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
i += 1
t3 = time.time() - t
t = time.time()
if i % 100 == 0:
print('generating %d/%d' % (i + 1, imdb.num_images) + ' proposal %d' % (dets.shape[0]) +
' data %.4fs net %.4fs post %.4fs' % (t1, t2, t3))
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
return imdb_boxes, original_boxes
def im_detect_mask(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
# ['mask_roi_score', 'mask_prob_output', 'mask_roi_pred_boxes']
if config.TEST.HAS_RPN:
pred_boxes = output['mask_roi_pred_boxes'].asnumpy()
scores = output['mask_roi_score'].asnumpy()
mask_outputs = output['mask_prob_output'].asnumpy()
mask_outputs = mask_outputs.reshape((data_batch.data[0].shape[0], -1) + mask_outputs.shape[1:])
else:
raise NotImplementedError
# we used scaled image & roi to train, so it is necessary to transform them back
if isinstance(scale, float) or isinstance(scale, int):
pred_boxes = pred_boxes / scale
elif isinstance(scale, np.ndarray):
pred_boxes = pred_boxes / scale[:, None, None]
return scores, pred_boxes, data_dict, mask_outputs
def im_detect_keypoint(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
# ['kp_prob_reshape_output', 'kp_roi_pred_boxes', 'kp_roi_score']
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
pred_boxes = output['kp_roi_pred_boxes'].asnumpy()
scores = output['kp_roi_score'].asnumpy()
if config.KEYPOINT.USE_HEATMAP:
kp_outputs = output['kp_prob_output'].asnumpy()
kp_outputs = kp_outputs.reshape((data_batch.data[0].shape[0], -1) + kp_outputs.shape[1:])
else:
kp_outputs = output['kp_prob_reshape_output'].asnumpy()
else:
raise NotImplementedError
# we used scaled image & roi to train, so it is necessary to transform them back
if isinstance(scale, float) or isinstance(scale, int):
pred_boxes = pred_boxes / scale
elif isinstance(scale, np.ndarray):
pred_boxes = pred_boxes / scale[:, None, None]
return scores, pred_boxes, data_dict, kp_outputs
def pred_eval_mask(predictor, test_data, imdb, roidb, result_path, vis=False, thresh=1e-1):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
num_images, num_classes = imdb.num_images, imdb.num_classes
results_list = []
all_boxes = [[None for _ in range(num_images)] for _ in range(num_classes)] # (#cls, #img)
all_masks = [[None for _ in range(num_images)] for _ in range(num_classes)] # (#cls, #img)
img_ind = 0
t = time.time()
SCORE_THRESH = 0.05
for im_infos, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scales = im_infos[:, 2]
scores, boxes, data_dict, mask_outputs = im_detect_mask(predictor, data_batch, data_names, scales)
t2 = time.time() - t
t = time.time()
for i in range(data_batch.data[0].shape[0] - data_batch.pad):
score, box, mask_output, im_info = scores[i], boxes[i], mask_outputs[i], im_infos[i:i + 1]
roi_rec = roidb[img_ind]
label = | np.argmax(score, axis=1) | numpy.argmax |
# Reverse photography
##h3D-II sensor size
# 36 * 48 mm, 0.036 x 0.048m
## focal length
# 28mm, 0.028m
## multiplier
# 1.0
from skimage import io
import matplotlib.pyplot as plt
import numpy as np
import cv2
from scipy.spatial import distance
import shapefile as shp
def buildshape(corners, filename):
"""build a shapefile geometry from the vertices of the image in
world coordinates, then save it using the image name. Sub critical"""
#create a shapefile instance
#shape = shp.writer(shape.POLYGON)
#shape.poly(parts = [[proj_coords[:,0], proj_coords[:,1]], [proj_coords[:,1], proj_coords[:,2]]
# [proj_coords[:,3], proj_coords[:,2]], [proj_coords[:,0], proj_coords[:,3]]]
#shape.save("./", filename)
def worldfile(corners, im_pix, filename, filepath):
"""build a world file from the vertices of the image in
world coordinates, then save it using the image name.
here we build a small array and then dump it to a file
input is:
- the image file name
- projected corners in world coordinates (*not* bounding box)
- pxel resolution as a two-element vector [pix_x, pix_y]
- path to warped image files
reference:
http://support.esri.com/en/knowledgebase/techarticles/detail/17489
"""
world_arr = np.zeros([6,1])
#line 1 is the X pixel resolution in M
world_arr[0] = im_pix[0]
#line 2 is the Y pixel resolution in M
world_arr[3] = -im_pix[1]
#now the X coord of the top left corner
world_arr[4] = np.min(corners[0,:])
#and the Y coordinate of the top left corner
world_arr[5] = np.max(corners[1,:])
#strip some parts from the filename
filename = filename[0:len(filename)-4]
np.savetxt(filepath + filename + '.jpgw', world_arr, "%.3f")
#------
# 2D homogeneous vectors and transformations
def hom2(x, y):
"""2D homogeneous column vector."""
return | np.matrix([x, y, 1]) | numpy.matrix |
import numpy as np
import pandas as pd
from pyswarm import pso
import scipy.optimize as optimize
from DolphinApi.config import *
from optimizers.tables import *
from optimizers.portfolio import *
def opti_min_func(weights, assets_id, return_matrix, cov_matrix, prices):
"""
Function to calculate Sharpe ratio
"""
true_w = np.round((weights * 100000000) / prices)
weights = [w / sum(true_w) for w in true_w]
weights = np.matrix(weights)
port_return = np.round(np.sum(weights * return_matrix.T) * 1274, 2)/5
port_volacity = np.round(
np.sqrt(weights * cov_matrix * weights.T) * np.sqrt(1274), 2)/ | np.sqrt(5) | numpy.sqrt |
import hoki.age_utils as au
import hoki.load as load
import pkg_resources
import numpy as np
import pandas as pd
import pytest
from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError
# Loading Data
data_path = pkg_resources.resource_filename('hoki', 'data')
hr_file = data_path + '/hrs-sin-imf_chab100.zem4.dat'
cmd_file = data_path + '/cmd_bv_z002_bin_imf135_300'
myhrd = load.model_output(hr_file, hr_type='TL')
mycmd = load.unpickle(cmd_file)
# Creating Test Inputs
fake_hrd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'star3'],
'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input = pd.DataFrame.from_dict({'logT': np.array(['bla']),
'logL': np.array([4.83])})
no_name_input = pd.DataFrame.from_dict({'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input2 = pd.DataFrame.from_dict({'logT': np.array([4.58, 'bla']),
'logL': np.array([4.83, 2.0])})
fake_cmd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'STAR3'],
'col': np.array([-0.3, 0.5, -0.25]),
'mag': np.array([-5, -10, -1])})
bad_cmd_input = pd.DataFrame.from_dict({'col': np.array(['bla']),
'mag': np.array([-5])})
# Testing Suite
class TestAgeWizard(object):
def test_init_basic(self):
assert au.AgeWizard(obs_df=fake_hrd_input, model=hr_file), "Loading HRD file path failed"
assert au.AgeWizard(obs_df=fake_hrd_input, model=myhrd), "Loading with hoki.hrdiagrams.HRDiagram failed"
assert au.AgeWizard(obs_df=fake_cmd_input, model=mycmd), 'Loading with hoki.cmd.CMD'
assert au.AgeWizard(obs_df=fake_cmd_input, model=cmd_file), 'Loading CMD from frile failed'
def test_bad_init(self):
with pytest.raises(HokiFatalError):
__, __ = au.AgeWizard(obs_df=fake_cmd_input, model='sdfghj'), 'HokiFatalError should be raised'
with pytest.raises(HokiFormatError):
__, __ = au.AgeWizard(obs_df='edrftgyhu', model=cmd_file), 'HokiFormatError should be raised'
def test_combine_pdfs_not_you(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
wiz.calculate_sample_pdf(not_you=['star1'])
cpdf = wiz.sample_pdf.pdf
assert np.sum(np.isclose([cpdf[0], cpdf[9]], [0.0, 0.7231526323765232])) == 2, "combined pdf is not right"
def test_most_likely_age(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
assert np.isclose(wiz.most_likely_age[0], 6.9), "Most likely age wrong"
def test_most_likely_ages(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
a = wiz.most_likely_ages
assert np.sum(np.isclose([a[0], a[1], a[2]], [6.9, 6.9, 6.9])) == 3, "Most likely ages not right"
def test_combine_pdfs(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
wiz.calculate_sample_pdf()
assert np.isclose(wiz.sample_pdf.pdf[9],0.551756734145878), "Something is wrong with the combined_Age PDF"
def test_calculate_p_given_age_range(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
probas = wiz.calculate_p_given_age_range([6.7, 6.9])
assert np.sum(np.isclose([probas[0], probas[1], probas[2]],
[0.515233714952414, 0.7920611550946726, 0.6542441096583737])) == 3, \
"probability given age range is messed up"
class TestFindCoordinates(object):
def test_hrd_input(self):
T_coord, L_coord = au.find_coordinates(obs_df=fake_hrd_input, model=myhrd)
assert np.sum(
np.isclose([T_coord[0], T_coord[1], T_coord[2]], [45, 44, 40])) == 3, "Temperature coordinates wrong"
assert np.sum(
np.isclose([L_coord[0], L_coord[1], L_coord[2]], [77, 80, 83])) == 3, "Luminosity coordinates wrong"
def test_cmd_input(self):
col_coord, mag_range = au.find_coordinates(obs_df=fake_cmd_input, model=mycmd)
assert np.sum(
np.isclose([col_coord[0], col_coord[1], col_coord[2]], [27, 35, 27])) == 3, "color coordinates wrong"
assert np.sum(
np.isclose([mag_range[0], mag_range[1], mag_range[2]], [90, 40, 130])) == 3, "magnitude coordinates wrong"
class TestFindCMDCoordinates(object):
def test_fake_input(self):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=fake_cmd_input, mycmd=mycmd)
assert np.sum(
np.isclose([col_coord[0], col_coord[1], col_coord[2]], [27, 35, 27])) == 3, "color coordinates wrong"
assert np.sum(
np.isclose([mag_range[0], mag_range[1], mag_range[2]], [90, 40, 130])) == 3, "magnitude coordinates wrong"
def test_bad_input(self):
with pytest.raises(HokiFormatError):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=bad_hrd_input, mycmd=mycmd)
def test_bad_input_2(self):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=bad_cmd_input, mycmd=mycmd)
#assert np.siz(col_coord[0]), "This should be a nan"
assert np.isclose(mag_range[0], 90), "This L coordinate is wrong - test_bad_input."
class TestFindHRDCoordinates(object):
def test_fake_input(self):
T_coord, L_coord = au._find_hrd_coordinates(obs_df=fake_hrd_input, myhrd=myhrd)
assert np.sum(
np.isclose([T_coord[0], T_coord[1], T_coord[2]], [45, 44, 40])) == 3, "Temperature coordinates wrong"
assert np.sum(
| np.isclose([L_coord[0], L_coord[1], L_coord[2]], [77, 80, 83]) | numpy.isclose |
import pandas as pd
import numpy as np
import pdb
import sys
sys.path.append('../../data')
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
import re
glm_all_f = pd.read_csv("../../results/glm_transfer/RMSE_transfer_glm_pball.csv")
train_df = pd.read_feather("../../results/transfer_learning/glm/glm_meta_train_rmses.feather")
train_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in | np.unique(glm_all_f['target_id'].values) | numpy.unique |
import numpy as np
from baseDecomp import BaseDecomp
from decimal import *
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from convexRegion.ellipse import Ellipsoid
from utils import vec2_to_rotation, vec3_to_rotation
epsilon_ = 1e-10
class Line():
def __init__(self, dim, p1, p2):
self.dim = dim
self.p1 = p1
self.p2 = p2
class LineSegment(BaseDecomp):
def __init__(self, dim, obs, local_bbox, p1, p2):
super().__init__(dim, obs, local_bbox)
self.p1 = p1
self.p2 = p2
def dilate(self, radius):
self.find_ellipsoid_2D(radius)
self.find_polyhedron()
self.add_local_bbox(self.polyhedron_)
def get_line_segment(self):
return Line(self.dim, self.p1, self.p2)
def add_local_bbox(self, polyhedron):
# 最初为地图大小
self.local_bbox_ = polyhedron
def find_ellipsoid_2D(self, offset):
f = Decimal(np.linalg.norm(self.p1-self.p2) / 2)
C = f * np.identity(2)
axes = np.array([f, f])
C[0][0] += offset
axes[0] += offset
if axes[0] > 0:
ratio = axes[1] / axes[0]
axes *= ratio
C *= ratio
Ri = vec2_to_rotation(self.p2 - self.p1)
C = Ri.dot(C).dot(Ri.T)
ellipsoid = Ellipsoid(2, C, (self.p1 + self.p2)/2)
obs = ellipsoid.points_inside(self.obs_)
obs_inside = obs
while len(obs_inside) > 0:
pw = ellipsoid.closest_point(obs_inside)
p = Ri.T.dot(pw - ellipsoid.d)
if p[0] < axes[0]:
axes[1]= np.abs(p[1]) / np.sqrt(1 - (p[0] / axes[0])**2)
new_C = np.identity(2)
new_C[0][0] = axes[0]
new_C[1][1] = axes[1]
ellipsoid.C = Ri.dot(new_C).dot(Ri.T)
obs_new = []
for item in obs_inside:
if 1 - ellipsoid.dist(item) > epsilon_:
obs_new.append(item)
obs_inside = obs_new
self.ellipsoid_ = ellipsoid
def find_ellipsoid_3D(self, offset):
f = Decimal(np.linalg.norm(self.p1 - self.p2) / 2)
C = f * np.identity(3)
axes = np.array([f, f, f])
C[0][0] += offset
axes[0] += offset
if axes[0] > 0:
ratio = axes[1] / axes[0]
axes *= ratio
C *= ratio
Ri = vec3_to_rotation(self.p2 - self.p1)
C = Ri.dot(C).dot(Ri.T)
ellipsoid = Ellipsoid(3, C, (self.p1 + self.p2) / 2)
Rf = Ri
obs = ellipsoid.points_inside(self.obs_)
obs_inside = obs
while len(obs_inside) > 0:
pw = ellipsoid.closest_point(obs_inside)
p = Ri.T.dot(pw - ellipsoid.d)
roll = np.atan2(p[2], p[1])
Rf = Ri * Quatf(np.cos(roll/2), np.sin(roll/2), 0, 0)
p = Rf.T.dot(pw - ellipsoid.d)
if p[0] < axes[0]:
axes[1] = np.abs(p[1]) / np.sqrt(1 - (p[0] / axes[0]) ** 2)
new_C = np.identity(3)
new_C[0][0] = axes[0]
new_C[1][1] = axes[1]
new_C[2][2] = axes[2]
ellipsoid.C = Rf.dot(new_C).dot(Rf.T)
obs_new = []
for item in obs_inside:
if 1 - ellipsoid.dist(item) > epsilon_:
obs_new.append(item)
obs_inside = obs_new
C = f * np.identity(3)
C[0][0] = axes[0]
C[1][1] = axes[1]
C[2][2] = axes[2]
ellipsoid.C = Rf.dot(C).dot(Rf.T)
obs_inside = ellipsoid.points_inside(obs)
while len(obs_inside) > 0:
pw = ellipsoid.closest_point(obs_inside)
p = Rf.T.dot(pw - ellipsoid.d)
dd = 1 - (p[0] / axes[0]) ** 2 - (p[1] / axes[1]) ** 2
if dd > epsilon_:
axes[2] = np.abs(p[2]) / np.sqrt(dd)
new_C = | np.identity(3) | numpy.identity |
from random import choice, random, sample
import numpy as np
import networkx as nx
from BanditAlg.BanditAlgorithms import ArmBaseStruct
class LinUCBUserStruct:
def __init__(self, featureDimension,lambda_, userID, RankoneInverse = False):
self.userID = userID
self.d = featureDimension
self.A = lambda_*np.identity(n = self.d)
self.b = np.zeros(self.d)
self.AInv = | np.linalg.inv(self.A) | numpy.linalg.inv |
import numpy as np
from scipy.constants import mu_0
# TODO: make this to take a vector rather than a single frequency
def rTEfunfwd(n_layer, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute reflection coefficients for Transverse Electric (TE) mode.
Only one for loop for multiple layers.
Parameters
----------
n_layer : int
The number layers
f : complex, ndarray
Frequency (Hz); size = (n_frequency x n_filter)
lamda : complex, ndarray
Frequency (Hz); size = (n_frequency x n_filter)
sig: compelx, ndarray
Conductivity (S/m); size = (n_layer x n_frequency x n_filter)
chi: compelx, ndarray
Susceptibility (SI); size = (n_layer,)
depth: float, ndarray
Top boundary of the layers; size = (n_ayer,)
HalfSwitch: bool
Switch for halfspace
Returns
-------
rTE: compex, ndarray
Reflection coefficients;
size = (n_frequency x n_lamba)
"""
n_frequency, n_filter = lamda.shape
Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros((n_frequency, n_filter), dtype=complex)
utemp0 = np.zeros((n_frequency, n_filter), dtype=complex)
utemp1 = np.zeros((n_frequency, n_filter), dtype=complex)
const = np.zeros((n_frequency, n_filter), dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0, :, :])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
# may be store these and reuse for sensitivity?
M00 = []
M10 = []
M01 = []
M11 = []
M0sum00 = Mtemp00
M0sum10 = Mtemp10
M0sum01 = Mtemp01
M0sum11 = Mtemp11
if HalfSwitch:
M1sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum00 = M0sum00
M1sum10 = M0sum10
M1sum01 = M0sum01
M1sum11 = M0sum11
else:
for j in range(n_layer-1):
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j])*sig[j, :, :])
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j+1])*sig[j+1, :, :])
const = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0)
h0 = thick[j]
Mtemp00 = 0.5*(1.+const)*np.exp(-2.*utemp0*h0)
Mtemp10 = 0.5*(1.-const)
Mtemp01 = 0.5*(1.-const)*np.exp(-2.*utemp0*h0)
Mtemp11 = 0.5*(1.+const)
M1sum00 = M0sum00*Mtemp00 + M0sum01*Mtemp10
M1sum10 = M0sum10*Mtemp00 + M0sum11*Mtemp10
M1sum01 = M0sum00*Mtemp01 + M0sum01*Mtemp11
M1sum11 = M0sum10*Mtemp01 + M0sum11*Mtemp11
M0sum00 = M1sum00
M0sum10 = M1sum10
M0sum01 = M1sum01
M0sum11 = M1sum11
rTE = M1sum01/M1sum11
return rTE
def matmul(a00, a10, a01, a11, b00, b10, b01, b11):
"""
Compute 2x2 matrix mutiplication in vector way
C = A*B
C = [a00 a01] * [b00 b01] = [c00 c01]
[a10 a11] [b10 b11] [c10 c11]
"""
c00 = a00*b00 + a01*b10
c10 = a10*b00 + a11*b10
c01 = a00*b01 + a01*b11
c11 = a10*b01 + a11*b11
return c00, c10, c01, c11
# TODO: make this to take a vector rather than a single frequency
def rTEfunjac(n_layer, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute sensitivity of reflection coefficients for
Transverse Electric (TE) mode with regard to conductivity
Parameters
----------
n_layer : int
The number layers
f : complex, ndarray
Frequency (Hz); size = (n_frequency x n_finlter)
lamda : complex, ndarray
Frequency (Hz); size = (n_frequency x n_finlter)
sig: compelx, ndarray
Conductivity (S/m); size = (n_layer x 1)
chi: compelx, ndarray
Susceptibility (SI); size = (n_layer x 1)
depth: float, ndarray
Top boundary of the layers
HalfSwitch: bool
Switch for halfspace
Returns
-------
rTE: compex, ndarray
Derivative of reflection coefficients;
size = (n_frequency x n_layer x n_finlter)
"""
# Initializing arrays
n_frequency, n_filter = lamda.shape
Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp11 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp11 = np.zeros((n_frequency, n_filter), dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros((n_frequency, n_filter), dtype=complex)
drTE = np.zeros((n_layer, n_frequency, n_filter), dtype=complex)
utemp0 = np.zeros((n_frequency, n_filter), dtype=complex)
utemp1 = np.zeros((n_frequency, n_filter), dtype=complex)
const = np.zeros((n_frequency, n_filter), dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0, :, :])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute M1
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0, :, :])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute dM1du1
dj0Mtemp00 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp10 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp01 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp11 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
# TODO: for computing Jacobian
M00 = []
M10 = []
M01 = []
M11 = []
dJ00 = []
dJ10 = []
dJ01 = []
dJ11 = []
M00.append(Mtemp00)
M01.append(Mtemp01)
M10.append(Mtemp10)
M11.append(Mtemp11)
M0sum00 = Mtemp00.copy()
M0sum10 = Mtemp10.copy()
M0sum01 = Mtemp01.copy()
M0sum11 = Mtemp11.copy()
if HalfSwitch or n_layer == 1:
M1sum00 = M0sum00.copy()
M1sum10 = M0sum10.copy()
M1sum01 = M0sum01.copy()
M1sum11 = M0sum11.copy()
else:
for j in range(n_layer-1):
dJ_10Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ_10Mtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ_10Mtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ_10Mtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ01Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ01Mtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ01Mtemp01 = | np.zeros((n_frequency, n_filter), dtype=complex) | numpy.zeros |
import bpy
import bmesh
import numpy as np
from mathutils import Vector
def find_first_view3d():
'''Helper function to find first space view 3d and associated window region.
The three returned objects are useful for setting up offscreen rendering in
Blender.
Returns
-------
area: object
Area associated with space view.
window: object
Window region associated with space view.
space: bpy.types.SpaceView3D
Space view.
'''
areas = [a for a in bpy.context.screen.areas if a.type == 'VIEW_3D']
assert len(areas) > 0
area = areas[0]
region = sorted([r for r in area.regions if r.type == 'WINDOW'], key=lambda x:x.width, reverse=True)[0]
spaces = [s for s in areas[0].spaces if s.type == 'VIEW_3D']
assert len(spaces) > 0
return area, spaces[0], region
def object_coordinates(*objs, depsgraph=None):
'''Returns XYZ object coordinates of all objects in positional *args.
Params
------
objs: list-like of bpy.types.Object
Object to return vertices for
depsgraph: bpy.types.Depsgraph, None
Dependency graph
Returns
-------
xyz: Nx3 array
World coordinates of object vertices
'''
# To be on the safe side, we use the evaluated object after
# all modifiers etc. applied (done internally by bmesh)
dg = depsgraph or bpy.context.evaluated_depsgraph_get()
xyz = []
for obj in objs:
eval_obj = obj.evaluated_get(dg)
xyz_obj = [v.co for v in eval_obj.data.vertices]
xyz.extend(xyz_obj)
return np.stack(xyz)
def world_coordinates(*objs, depsgraph=None):
'''Returns XYZ world coordinates of all objects in positional *args.
Params
------
objs: list-like of bpy.types.Object
Object to return vertices for
depsgraph: bpy.types.Depsgraph, None
Dependency graph
Returns
-------
xyz: Nx3 array
World coordinates of object vertices
'''
# To be on the safe side, we use the evaluated object after
# all modifiers etc. applied (done internally by bmesh)
dg = depsgraph or bpy.context.evaluated_depsgraph_get()
xyz = []
for obj in objs:
eval_obj = obj.evaluated_get(dg)
xyz_obj = [(eval_obj.matrix_world @ v.co) for v in eval_obj.data.vertices]
xyz.extend(xyz_obj)
return np.stack(xyz)
def bbox_world_coordinates(*objs, depsgraph=None):
'''Returns XYZ world coordinates of all bounding box corners of each object in *objs.
Params
------
objs: list-like of bpy.types.Object
Object to return vertices for
depsgraph: bpy.types.Depsgraph, None
Dependency graph
Returns
-------
xyz: Nx3 array
World coordinates of object vertices
'''
# To be on the safe side, we use the evaluated object after
# all modifiers etc. applied (done internally by bmesh)
dg = depsgraph or bpy.context.evaluated_depsgraph_get()
xyz = []
for obj in objs:
eval_obj = obj.evaluated_get(dg)
xyz_obj = [(eval_obj.matrix_world @ Vector(c)) for c in eval_obj.bound_box]
xyz.extend(xyz_obj)
return | np.stack(xyz) | numpy.stack |
import os
import numpy as np
import scipy.misc
from scipy.misc import imsave
from LRCN import load, videoRender
import dataset_list
datasets = dataset_list.datasets_LRCN
for num, dataset in enumerate(datasets):
indir = ''.join([os.path.basename(x)+'_' for x in dataset])[0:-1]
indir = os.path.join(os.path.dirname(dataset[0]), 'output', indir)
ds_name = os.path.basename(os.path.dirname(dataset[0]))
all_predictions = np.load(os.path.join(indir, 'all_prediction.npy'))
all_gt = np.load(os.path.join(indir, 'all_gt.npy'))
pcas = np.load(os.path.join(indir, 'pcas.npy'), encoding='latin1')
n_planes = 1
# load original data to render the uncompressed (pca) test images as movie
train, test = load.load_train_and_test_data(dataset)
# render the low-speed predictions (one for each test MR image)
for plane in range(n_planes):
pca = pcas[plane]
recon_test = pca.inverse_transform(all_predictions[plane])
recon_test = np.reshape(recon_test, (recon_test.shape[0], 192, 192))
test_gt = pca.inverse_transform(all_gt[plane])
test_gt = np.reshape(test_gt, (test_gt.shape[0], 192, 192))
recon_test[recon_test < 0] = 0
test_gt[test_gt < 0] = 0
test_gt[test_gt == np.inf] = 0
ma = np.max(test_gt) # normalize with the maximum of the ground-truth
recon_test = recon_test / (ma)
test_gt = test_gt / (ma)
test_gt *= 0.75 # matching KDE histogram profile as good as it gets
recon_test *= 0.75 # matching KDE histogram profile as good as it gets
# '3 - plane' equals the number of invalid MR frames at the beginning
# (3 for plane 1, 2 for plane 2)
test_gt = test['mri'][0][3-plane:3-plane+25, :, :]
test_gt[test_gt < 0] = 0
test_gt = test_gt / (ma) # Normalize ground-truth
diff = (recon_test - test_gt) / 2 + 0.5
comparison = np.concatenate((recon_test, test_gt, diff), axis=1)
comparison = np.concatenate((comparison, comparison, comparison),
axis=2)
text = "LRCN output Acquired Difference"
vid_fn = os.path.join(indir,
ds_name + '_comparison_plane{}_'.format(plane))
videoRender.render_movie(comparison, vid_fn + '.mp4', 0.6, text)
os.system('ffmpeg -y -i ' + vid_fn + '.mp4'
+ ' -filter:v \"crop=576:192:0:0\" ' + vid_fn[0:-1] + '.mp4')
print('Native speed comparison video saved to {}'.format(vid_fn))
idx_mid = int(test_gt.shape[1] / 2)
mmode_test_gt = test_gt[:, idx_mid, :]
mmode_recon_test = recon_test[:, idx_mid, :]
mmode_diff = mmode_recon_test - mmode_test_gt
divider = np.transpose(np.ndarray((2, mmode_test_gt.shape[1])))
divider.fill(np.max(mmode_test_gt))
mmode_comparison = np.concatenate((np.transpose(mmode_recon_test),
divider, np.transpose(mmode_test_gt),
divider, np.abs(np.transpose(
mmode_diff))), axis=1)
imsave(os.path.join(indir,
'mmode_test_gt_plane'+str(plane)+'.png'), mmode_test_gt)
imsave(os.path.join(indir,
'mmode_recon_test_plane'+str(plane)+'.png'), mmode_recon_test)
imsave(os.path.join(indir,
'mmode_diff_plane'+str(plane)+'.png'), mmode_diff)
imsave(os.path.join(indir,
'mmode_comparison_plane'+str(plane)+'.png'), mmode_comparison)
# pick a specific image, save reconstruction and ground-truth as image
example_image_gt = np.transpose(np.squeeze(test_gt[15, :, :]))
example_image_rec = np.transpose(np.squeeze(
recon_test[15, :, :]))
imsave(os.path.join(indir,
'example_image_gt_plane'+str(plane)+'.png'), example_image_gt)
imsave(os.path.join(indir,
'example_image_rec_plane'+str(plane)+'.png'), example_image_rec)
imsave(os.path.join(indir,
'example_image_diff_plane'+str(plane)+'.png'),
np.abs(example_image_rec-example_image_gt))
print('M-mode images and example images saved to {}'.format(indir))
# render high-speed predictions (at the speed of OCM)
highspeed_prediction = np.load(os.path.join(indir,
'highspeed_prediction.npy'))
for plane in range(n_planes):
# make high speed prediction movie
pca = pcas[plane]
# Reconstruct 5000 images using inverse PCA
highspeed_prediction_ = pca.inverse_transform(
highspeed_prediction[plane][0:5000, :])
highspeed_prediction_ = np.reshape(highspeed_prediction_,
(highspeed_prediction_.shape[0],
192, 192))
m = comparison.mean()
std = comparison.std()
highspeed_prediction_
highspeed_prediction_[highspeed_prediction_ < m - 2*std] = np.inf
highspeed_prediction_ = highspeed_prediction_ \
- | np.min(highspeed_prediction_) | numpy.min |
import torch
import numpy as np
from hparams import create_hparams as hps
def mode(obj, model = False):
if model and hps.is_cuda:
obj = obj.cuda()
elif hps.is_cuda:
obj = obj.cuda(non_blocking = hps.pin_mem)
return obj
def to_arr(var):
return var.cpu().detach().numpy().astype(np.float32)
def get_mask_from_lengths(lengths, pad = False):
max_len = torch.max(lengths).item()
if pad and max_len%hps.n_frames_per_step != 0:
max_len += hps.n_frames_per_step - max_len%hps.n_frames_per_step
assert max_len%hps.n_frames_per_step == 0
ids = torch.arange(0, max_len, out = torch.LongTensor(max_len))
ids = mode(ids)
mask = (ids < lengths.unsqueeze(1))
return mask
import scipy
import librosa
import librosa.filters
import numpy as np
from scipy.io import wavfile
from hparams import hparams as hps
def load_wav(path):
sr, wav = wavfile.read(path)
wav = wav.astype(np.float32)
wav = wav / np.max(np.abs(wav))
try:
assert sr == hps.sample_rate
except:
print('Error:', path, 'has wrong sample rate.')
return wav
def save_wav(wav, path):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
wavfile.write(path, hps.sample_rate, wav.astype(np.int16))
def preemphasis(x):
return scipy.signal.lfilter([1, -hps.preemphasis], [1], x)
def inv_preemphasis(x):
return scipy.signal.lfilter([1], [1, -hps.preemphasis], x)
def spectrogram(y):
D = _stft(preemphasis(y))
S = _amp_to_db(np.abs(D)) - hps.ref_level_db
return _normalize(S)
def inv_spectrogram(spectrogram):
'''Converts spectrogram to waveform using librosa'''
S = _db_to_amp(_denormalize(spectrogram) + hps.ref_level_db) # Convert back to linear
return inv_preemphasis(_griffin_lim(S ** hps.power)) # Reconstruct phase
def melspectrogram(y):
D = _stft(preemphasis(y))
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hps.ref_level_db
return _normalize(S)
def inv_melspectrogram(spectrogram):
mel = _db_to_amp(_denormalize(spectrogram) + hps.ref_level_db)
S = _mel_to_linear(mel)
return inv_preemphasis(_griffin_lim(S ** hps.power))
def find_endpoint(wav, threshold_db=-40, min_silence_sec=0.8):
window_length = int(hps.sample_rate * min_silence_sec)
hop_length = int(window_length / 4)
threshold = _db_to_amp(threshold_db)
for x in range(hop_length, len(wav) - window_length, hop_length):
if np.max(wav[x:x + window_length]) < threshold:
return x + hop_length
return len(wav)
def _griffin_lim(S):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles)
for i in range(hps.gl_iters):
angles = np.exp(1j * np.angle(_stft(y)))
y = _istft(S_complex * angles)
return y
def _stft(y):
n_fft, hop_length, win_length = _stft_parameters()
return librosa.stft(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length)
def _istft(y):
_, hop_length, win_length = _stft_parameters()
return librosa.istft(y, hop_length=hop_length, win_length=win_length)
def _stft_parameters():
return (hps.num_freq - 1) * 2, hps.frame_shift, hps.frame_length
# Conversions:
_mel_basis = None
def _linear_to_mel(spectrogram):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return | np.dot(_mel_basis, spectrogram) | numpy.dot |
import d4rl
import click
import h5py
import os
import gym
import numpy as np
import pickle
from mjrl.utils.gym_env import GymEnv
DESC = """
Helper script to visualize policy (in mjrl format).\n
USAGE:\n
Visualizes policy on the env\n
$ python utils/visualize_policy --env_name relocate-v0 --policy policies/relocate-v0.pickle --mode evaluation\n
"""
# MAIN =========================================================
@click.command(help=DESC)
@click.option("--env_name", type=str, help="environment to load", required=True)
# @click.option('--policy', type=str, help='absolute path of the policy file', required=True)
@click.option("--num_trajs", type=int, help="Num trajectories", default=5000)
@click.option(
"--mode",
type=str,
help="exploration or evaluation mode for policy",
default="evaluation",
)
def main(env_name, mode, num_trajs, clip=True):
e = GymEnv(env_name)
policy = "./policies/" + env_name + ".pickle"
pi = pickle.load(open(policy, "rb"))
# render policy
pol_playback(env_name, pi, num_trajs, clip=clip)
def pol_playback(env_name, pi, num_trajs=100, clip=True):
e = gym.make(env_name)
e.reset()
obs_ = []
act_ = []
rew_ = []
term_ = []
info_qpos_ = []
info_qvel_ = []
info_mean_ = []
info_logstd_ = []
ravg = []
for n in range(num_trajs):
e.reset()
returns = 0
for t in range(e._max_episode_steps):
obs = e.get_obs()
obs_.append(obs)
info_qpos_.append(e.env.data.qpos.ravel().copy())
info_qvel_.append(e.env.data.qvel.ravel().copy())
action, infos = pi.get_action(obs)
action = pi.get_action(obs)[0] # eval
if clip:
action = np.clip(action, -1, 1)
act_.append(action)
info_mean_.append(infos["mean"])
info_logstd_.append(infos["log_std"])
_, rew, _, info = e.step(action)
returns += rew
rew_.append(rew)
done = False
if t == (e._max_episode_steps - 1):
done = True
term_.append(done)
# e.env.mj_render() # this is much faster
# e.render()
ravg.append(returns)
print(n, returns, np.mean(ravg))
# write out hdf5 file
obs_ = np.array(obs_).astype(np.float32)
act_ = np.array(act_).astype(np.float32)
rew_ = np.array(rew_).astype(np.float32)
term_ = np.array(term_).astype(np.bool_)
info_qpos_ = | np.array(info_qpos_) | numpy.array |
# Copyright (c) 2016 by <NAME> and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
"""
.. module:: interp
"""
from __future__ import print_function
import numpy as np
import scipy.linalg
import galsim
import warnings
from .interp import Interp
from .star import Star, StarFit
class BasisInterp(Interp):
"""An Interp class that works whenever the interpolating functions are
linear sums of basis functions. Does things the "slow way" to be stable to
degenerate fits to individual stars, instead of fitting to parameter sets
produced by single stars.
First time coding this we will assume that each element of the PSF parameter
vector p is a linear combination of the same set of basis functions across the
focal plane,
p_i = \sum_{j} q_{ij} K_j(u,v,other stellar params).
The property degenerate_points is set to True to indicate that this interpolator
uses the alpha/beta quadratic form of chisq for each sample, rather than assuming
that a best-fit parameter vector is available at every sample.
Internally we'll store the interpolation coefficients in a 2d array of dimensions
(nparams, nbases)
Note: This is an abstract base class. The concrete class you probably want to use
is BasisPolynomial.
"""
def __init__(self):
self.degenerate_points = True # This Interpolator uses chisq quadratic forms
self.q = None
def initialize(self, stars, logger=None):
"""Initialize both the interpolator to some state prefatory to any solve iterations and
initialize the stars for use with this interpolator.
This class will initialize everything to have constant PSF parameter vector taken
from the first Star in the list.
:param stars: A list of Star instances to use to initialize.
:param logger: A logger object for logging debug info. [default: None]
:returns: A new list of Stars which have their parameters initialized.
"""
c = stars[0].fit.params.copy()
self.q = c[:,np.newaxis] * self.constant(1.)[np.newaxis,:]
stars = self.interpolateList(stars)
return stars
def basis(self, star):
"""Return 1d array of polynomial basis values for this star
:param star: A Star instance
:returns: 1d numpy array with values of u^i v^j for 0<i+j<=order
"""
raise NotImplementedError("Cannot call `basis` for abstract base class BasisInterp. "
"You probably want to use BasisPolynomial.")
def constant(self, value=1.):
"""Return 1d array of coefficients that represent a polynomial with constant value.
:param value: The value to use as the constant term. [default: 1.]
:returns: 1d numpy array with values of u^i v^j for 0<i+j<=order
"""
raise NotImplementedError("Cannot call `constant` for abstract base class BasisInterp. "
"You probably want to use BasisPolynomial.")
def solve(self, stars, logger=None):
"""Solve for the interpolation coefficients given some data.
The StarFit element of each Star in the list is assumed to hold valid
alpha and beta members specifying depending of chisq on differential
changes to its parameter vector.
:param stars: A list of Star instances to interpolate between
:param logger: A logger object for logging debug info. [default: None]
"""
logger = galsim.config.LoggerWrapper(logger)
if self.q is None:
raise RuntimeError("Attempt to solve() before initialize() of BasisInterp")
# Empty A and B
A = np.zeros( self.q.shape+self.q.shape, dtype=float)
B = np.zeros_like(self.q)
for s in stars:
# Get the basis function values at this star
K = self.basis(s)
# Sum contributions into A, B
B += s.fit.beta[:,np.newaxis] * K
tmp = s.fit.alpha[:,:,np.newaxis] * K
A += K[np.newaxis,:,np.newaxis,np.newaxis] * tmp[:,np.newaxis,:,:]
# Reshape to have single axis for all q's
B = B.flatten()
nq = B.shape[0]
A = A.reshape(nq,nq)
logger.debug('Beginning solution of matrix size %d',A.shape[0])
# cf. comments in pixelgrid.py about this function in scipy 1.0.0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
logger.info('A.shape = %s',A.shape)
logger.info('B.shape = %s',B.shape)
dq = scipy.linalg.solve(A, B, assume_a='pos', check_finite=False)
if len(w) > 0:
logger.warning('Caught %s',w[0].message)
logger.debug('norm(A dq - B) = %s',scipy.linalg.norm(A.dot(dq) - B))
logger.debug('norm(dq) = %s',scipy.linalg.norm(dq))
if False:
logger.warning('Switching to svd solution')
Sd,U = scipy.linalg.eigh(A)
nsvd = np.sum(np.abs(Sd) > 1.e-15 * np.abs(Sd[-1]))
logger.info('2-condition is %e',np.abs(Sd[-1]/Sd[0]))
logger.info('nsvd = %d of %d',nsvd,len(Sd))
# Note: unlike scipy.linalg.svd, the Sd here is in *ascending* order, not descending.
Sd[-nsvd:] = 1./Sd[-nsvd:]
Sd[:-nsvd] = 0.
S = np.diag(Sd)
dq = U.dot(S.dot(U.T.dot(B)))
logger.info('norm(A dq - B) = %s',scipy.linalg.norm(A.dot(dq) - B))
logger.info('norm(dq) = %s',scipy.linalg.norm(dq))
logger.info('norm(q) = %s',scipy.linalg.norm(self.q))
logger.debug('...finished solution')
self.q += dq.reshape(self.q.shape)
def interpolate(self, star, logger=None):
"""Perform the interpolation to find the interpolated parameter vector at some position.
:param star: A Star instance to which one wants to interpolate
:param logger: A logger object for logging debug info. [default: None]
:returns: a new Star instance holding the interpolated parameters
"""
if self.q is None:
raise RuntimeError("Attempt to interpolate() before initialize() of BasisInterp")
K = self.basis(star)
p = np.dot(self.q,K)
fit = star.fit.newParams(p)
return Star(star.data, fit)
class BasisPolynomial(BasisInterp):
"""A version of the Polynomial interpolator that works with BasisModels and can use the
quadratic form of the chisq information it calculates. It works better than the regular
Polynomial interpolator when there is missing or degenerate information.
The order is the highest power of a key to be used. This can be the same for all keys
or you may provide a list of separate order values to be used for each key. (e.g. you
may want to use 2nd order in the positions, but only 1st order in the color).
All combinations of powers of keys that have total order <= max_order are used.
The maximum order is normally the maximum order of any given key's order, but you may
specify a larger value. (e.g. to use 1, x, y, xy, you would specify order=1, max_order=2.)
:param order: The order to use for each key. Can be a single value (applied to all
keys) or an array matching number of keys.
:param keys: List of keys for properties that will be used as the polynomial arguments.
[default: ('u','v')]
:param max_order: The maximum total order to use for cross terms between keys.
[default: None, which uses the maximum value of any individual key's order]
:param logger: A logger object for logging debug info. [default: None]
"""
def __init__(self, order, keys=('u','v'), max_order=None, logger=None):
super(BasisPolynomial, self).__init__()
self._keys = keys
if hasattr(order,'len'):
if not len(order)==len(keys):
raise ValueError('Number of provided orders does not match number of keys')
self._orders = order
else:
self._orders = (order,) * len(keys)
if max_order is None:
self._max_order = np.max(self._orders)
else:
self._max_order = max_order
if self._max_order<0 or np.any(np.array(self._orders) < 0):
# Exception if we have any requests for negative orders
raise ValueError('Negative polynomial order specified')
# TODO: Need to update the Interp write command to handle lists.
# Or write a custom BasisPolynomial.write function.
self.kwargs = {
'order' : order,
}
# Now build a mask that picks the desired polynomial products
# Start with 1d arrays giving orders in all dimensions
ord_ranges = [np.arange(order+1,dtype=int) for order in self._orders]
# Nifty trick to produce n-dim array holding total order
sumorder = np.sum( | np.ix_(*ord_ranges) | numpy.ix_ |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 23:53:09 2016
@author: ORCHISAMA
"""
#calculate short time fourier transform and plot spectrogram
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
from scipy.signal import hann
def nearestPow2(inp):
power = np.ceil(np.log2(inp))
return 2**power
def stft(signal, fs, nfft, overlap):
#plotting time domain signal
plt.figure(1)
t = np.arange(0,len(signal)/fs, 1/fs)
plt.plot(t,signal)
plt.axis(xmax = 1)
plt.xlabel('Time in seconds')
plt.ylabel('Amplitude')
plt.title('Speech signal')
if not np.log2(nfft).is_integer():
nfft = nearestPow2(nfft)
slength = len(signal)
hop_size = np.int32(overlap * nfft)
nFrames = int(np.round(len(signal)/(nfft-hop_size)))
#zero padding to make signal length long enough to have nFrames
signal = np.append(signal, np.zeros(nfft))
STFT = np.empty((nfft, nFrames))
segment = np.zeros(nfft)
start = 0
for n in range(nFrames):
segment = signal[start:start+nfft] * hann(nfft)
padded_seg = np.append(segment, | np.zeros(nfft) | numpy.zeros |
"""
data_utils.py
Collection of functions for dealing with data for plotting.
Author: <NAME>
Date: 01/27/2020
"""
from __future__ import absolute_import, division, print_function
import os
import arviz as az
import numpy as np
import tensorflow as tf
import pandas as pd
import xarray as xr
from dataclasses import dataclass
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from typing import Dict
from pathlib import Path
import utils.file_io as io
from utils.file_io import timeit
from lattice.utils import u1_plaq_exact
from utils.attr_dict import AttrDict
# from plotters.plot_utils import get_matching_log_dirs
# from plotters.plot_observables import get_obs_dict, grid_plot
mpl.style.use('fast')
sns.set_palette('bright')
TLS_DEFAULT = mpl.rcParams['xtick.labelsize']
@dataclass
class RunParams:
hmc: bool
run_dir: str
eps: float
beta: float
run_steps: int
plaq_weight: float
charge_weight: float
num_steps: int
x_shape: tuple
input_shape: tuple
def __post__init__(self):
self.traj_len = self.num_steps * self.eps
@dataclass
class ChargeData:
q: tf.Tensor
dq: tf.Tensor
params: RunParams
# pylint:disable=invalid-name
def filter_dict(d, cond, key=None):
if key is not None:
val = d[key]
if isinstance(val, dict):
return {
k: v for k, v in val.items() if cond
}
raise ValueError('If passing a key, d[key] must be a dict.')
return {
k: v for k, v in d.items() if cond
}
def _look(p, s, conds=None):
print(f'Looking in {p}...')
matches = [x for x in Path(p).rglob(f'*{s}*')]
if conds is not None:
if isinstance(conds, (list, tuple)):
for cond in conds:
matches = [x for x in matches if cond(x)]
else:
matches = [x for x in matches if cond(x)]
return matches
def _get_dirs(paths, hmc=False):
def _look(p, s, conds=None):
print(f'Looking in {p}...')
matches = [x for x in Path(p).rglob(f'*{s}*')]
if conds is not None:
if isinstance(conds, (list, tuple)):
for cond in conds:
matches = [x for x in matches if cond(x)]
else:
matches = [x for x in matches if cond(x)]
return matches
dirs = []
if hmc:
search_str = 'HMC_L16_b'
conds = (
lambda x: 'hmc_logs' in str(x),
lambda x: 'hmc' in str(x).lower()
)
else:
search_str = 'L16_b2048'
conds = (
lambda x: 'GaugeModel_logs' in (str(x)),
lambda x: 'HMC_' not in str(x),
lambda x: Path(x).is_dir(),
)
if isinstance(paths, (list, tuple)):
for path in paths:
dirs += _look(path, search_str, conds)
else:
dirs = _look(paths, search_str, conds)
return dirs
def load_from_dir(d, fnames=None):
if fnames is None:
fnames = {
'dq': 'dq.z',
'charges': 'charges.z',
'run_params': 'run_params.z'
}
darr = [x for x in Path(d).iterdir() if x.is_dir()]
for rd in darr:
files = {k: sorted(rd.glob(f'*{v}*')) for k, v in fnames.items()}
data = {k: io.loadz(v) for k, v in files.items()}
return data
def load_charge_data(dirs, hmc=False):
data = {}
for d in dirs:
print(f'Looking in dir: {d}...')
if 'inference_hmc' in str(d):
print(f'Skipping {str(d)}...')
continue
dqfile = sorted(d.rglob('dq.z'))
qfile = sorted(d.rglob('charges.z'))
rpfile = sorted(d.rglob('run_params.z'))
num_runs = len(dqfile)
if num_runs > 0:
for dqf, qf, rpf in zip(dqfile, qfile, rpfile):
params = io.loadz(rpf)
if 'xeps' and 'veps' in params.keys():
xeps = np.array([i.numpy() for i in params['xeps']])
veps = np.array([i.numpy() for i in params['veps']])
eps = (np.mean(xeps) + np.mean(veps)) / 2.
elif 'eps' in params.keys():
eps = params['eps']
params['eps'] = eps
params = RunParams(**params)
qarr = io.loadz(qf)
dqarr = io.loadz(dqf)
print(
'...loading data for (beta, num_steps, eps): '
f'({params.beta}, {params.num_steps}, {params.eps:.3g})'
)
charge_data = ChargeData(q=qarr, dq=dqarr, params=params)
try:
data[params.beta].update({params.traj_len: charge_data})
except KeyError:
data[params.beta] = {params.traj_len: charge_data}
# def _update_dict(beta, z, qdata):
# try:
# z[beta].update({params.traj_len: qdata})
# except KeyError:
# z[beta] = {params.traj_len: qdata}
#
# return z
#
# data = _update_dict(params.beta, data, charge_data)
return data
def calc_tau_int(data, therm_frac=0.2):
"""Calculate the integrated autocorrelation time."""
tau_int = {}
for key, val in data.items():
tau_int[key] = {}
for k, v in val.items():
arr, _ = therm_arr(v, therm_frac=therm_frac)
arr = arr.T
pass
# reference:
# https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md
# ----------
# Problem:
# 100. Compute bootstrapped 95% confidence intervals for the mean of a 1D
# array X (i.e., resample the elements of an array with replacement N times,
# compute the mean of each sample, and then compute percentiles over the
# means).
def bootstrapped_confidence_interval(x: np.ndarray, N: int = 1000):
idx = np.random.randint(0, x.size, (N, x.size))
means = x[idx].mean(axis=1)
confint = np.percentile(means, [2.5, 97.5])
return confint
# Reference: https://dfm.io/posts/autocorr/
def next_pow_two(n):
i = 1
while i < n:
i = i << 1
return i
def autocorr_func_1d(x, norm=True):
"""Compute the autocorrelation function of a 1D chain."""
x = np.atleast_1d(x)
if len(x.shape) != 1:
raise ValueError('Invalid dimensions for 1D autocorrelation function.')
n = next_pow_two(len(x))
# Compute the FFT and then (from that) the auto-correlation function
f = np.fft.fft(x - np.mean(x), n=2*n)
acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real
acf /= 4 * n
# Optionally normalize
if norm:
acf /= acf[0]
return acf
def auto_window(taus, c):
"""Automated windowing procedure following Sokal (1989)."""
m = np.arange(len(taus)) < c * taus
if np.any(m):
return np.argmin(m)
return len(taus) - 1
def autocorr_gw2010(y, c=5.0):
"""Following the suggestion from Goodman & Weare (2010)."""
f = autocorr_func_1d(np.mean(y, axis=0))
taus = 2.0 * np.cumsum(f) - 1.0
window = auto_window(taus, c)
return taus[window]
def autocorr_new(y, c=5.0):
"""New implementation of autocorrelation function."""
f = np.zeros(y.shape[1])
for yy in y:
f += autocorr_func_1d(yy)
f /= len(y)
taus = 2.0 * np.cumsum(f) - 1.0
window = auto_window(taus, c)
return taus[window]
def calc_autocorr(x):
N = np.exp(np.linspace(np.log(100), np.log(y.shape[1]), 20)).astype(int)
new = np.empty(len(N))
for i, n in enumerate(N):
new[i] = autocorr_new(y[:, :n])
return N, new
def flatten_dict(d):
"""Recursively convert all entries of `d` to be `AttrDict`."""
if not isinstance(d, AttrDict):
d = AttrDict(**d)
for key, val in d.items():
if isinstance(val, dict):
if not isinstance(val, AttrDict):
d[key] = flatten_dict(val)
else:
d[key] = AttrDict(**val)
return d
def _load_inference_data(log_dir, fnames, inference_str='inference'):
"""Helper function for loading inference data from `log_dir`."""
run_dir = os.path.join(log_dir, inference_str)
if os.path.isdir(run_dir):
data_dir = os.path.join(run_dir, 'run_data')
rp_file = os.path.join(run_dir, 'run_params.z')
if os.path.isfile(rp_file) and os.path.isdir(data_dir):
run_params = io.loadz(rp_file)
key = (run_params['beta'],
run_params['eps'],
run_params['num_steps'])
data = [
io.loadz(os.path.join(data_dir, f'{fname}.z'))
for fname in fnames
]
return key, data
def load_inference_data(dirs, search_strs, inference_str='inference'):
data = {
s: {} for s in search_strs
}
for d in dirs:
print(f'Looking in dir: {d}...')
run_dir = Path(os.path.join(d, inference_str))
if run_dir.is_dir():
run_dirs = [x for x in run_dir.iterdir() if x.is_dir()]
for rd in run_dirs:
print(f'...looking in run_dir: {rd}...')
rp_file = os.path.join(str(rd), 'run_params.z')
if os.path.isfile(rp_file):
params = io.loadz(rp_file)
beta = params['beta']
eps = params['eps']
num_steps = params['num_steps']
data_dir = os.path.join(str(rd), 'run_data')
if os.path.isdir(data_dir):
for search_str in search_strs:
dfile = os.path.join(data_dir, f'{search_str}.z')
if os.path.isfile(dfile):
_data = io.loadz(dfile)
try:
data[search_str].update({
(beta, num_steps, eps): _data
})
except KeyError:
data[search_str] = {
(beta, num_steps, eps): _data
}
return data
def _get_l2hmc_dirs(paths, search_str=None):
"""Look for `log_dirs` containing a training/inference run for L2HMC."""
if search_str is None:
search_str = '*L16_b*'
dirs = []
for path in paths:
if not isinstance(path, Path):
path = Path(os.path.abspath(path))
print(f'Looking in {path}...')
dirs += [
x for x in path.rglob(search_str)
if 'GaugeModel_logs' in str(x)
and 'HMC_' not in str(x)
and x.is_dir()
]
return dirs
def get_l2hmc_dirs():
bd_local = os.path.abspath(
'/Users/saforem2/thetaGPU/training'
)
bd_theta = os.path.abspath(
'/lus/theta-fs0/projects/DLHMC/thetaGPU/training'
)
l2hmc_dirs = []
if os.path.isdir(bd_local):
l2hmc_dirs += _get_l2hmc_dirs(bd_local)
if os.path.isdir(bd_theta):
l2hmc_dirs += _get_l2hmc_dirs(bd_theta)
return l2hmc_dirs
def get_hmc_dirs(base_dir=None):
if base_dir is None:
base_dir = os.path.abspath(
'/lus/theta-fs0/projects/DLHMC/thetaGPU/inference/'
)
if not os.path.isdir(base_dir):
base_dir = os.path.abspath(
'/Users/saforem2/thetaGPU/inference'
)
if not os.path.isdir(base_dir):
raise FileNotFoundError(f'Unable to locate {base_dir}')
base_dir = Path(base_dir)
hmc_dirs = [x for x in hmc_dir.rglob('*HMC_L16*') if x.is_dir()]
return hmc_dirs
def bootstrap(x, reps=10000):
n = len(x)
xb = np.random.choice(x, (n, reps), replace=True)
yb = xb.mean(axis=0)
upper, lower = np.percentile(yb, [2.5, 97.5])
return yb, (lower, upper)
def dq_stats(dq, reps=10000, therm_frac=0.2):
stats = {}
for key, val in dq.items():
for k, v in val.items():
data = therm_arr(v, therm_frac=therm_frac, ret_steps=False)
avgs = []
errs = []
for chain in data.T:
avg, (lower, upper) = bootstrap(chain, reps)
err = np.max([np.abs(avg - lower), np.abs(upper - avg)])
avgs.append(avg)
errs.append(err)
try:
stats[key].update({
k: {
'avg': np.mean(avgs),
'avg_std': np.std(avgs),
'err': np.mean(errs),
'err_std': np.std(errs),
'min': np.min(data),
'max': np.max(data),
}
})
except KeyError:
stats[key] = {
k: {
'avg': | np.mean(avgs) | numpy.mean |
ENABLE_MULTIPROCESSING = True
from dsl import cpp_trace_param_automata
def generate_public_submission():
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from xgboost import XGBClassifier
import pdb
# data_path = Path('.')
data_path = Path('.')
if not (data_path / 'test').exists():
data_path = Path('../input/abstraction-and-reasoning-challenge')
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
def plot_result(test_input, test_prediction,
input_shape):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
test_input = test_input.reshape(input_shape[0], input_shape[1])
axs[0].imshow(test_input, cmap=cmap, norm=norm)
axs[0].axis('off')
axs[0].set_title('Actual Target')
test_prediction = test_prediction.reshape(input_shape[0], input_shape[1])
axs[1].imshow(test_prediction, cmap=cmap, norm=norm)
axs[1].axis('off')
axs[1].set_title('Model Prediction')
plt.tight_layout()
plt.show()
def plot_test(test_prediction, task_name):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 1, figsize=(15, 15))
axs.imshow(test_prediction, cmap=cmap, norm=norm)
axs.axis('off')
axs.set_title(f'Test Prediction {task_name}')
plt.tight_layout()
plt.show()
# https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
sample_sub1 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub1 = sample_sub1.set_index('output_id')
sample_sub1.head()
def get_moore_neighbours(color, cur_row, cur_col, nrows, ncols):
if cur_row <= 0:
top = -1
else:
top = color[cur_row - 1][cur_col]
if cur_row >= nrows - 1:
bottom = -1
else:
bottom = color[cur_row + 1][cur_col]
if cur_col <= 0:
left = -1
else:
left = color[cur_row][cur_col - 1]
if cur_col >= ncols - 1:
right = -1
else:
right = color[cur_row][cur_col + 1]
return top, bottom, left, right
def get_tl_tr(color, cur_row, cur_col, nrows, ncols):
if cur_row == 0:
top_left = -1
top_right = -1
else:
if cur_col == 0:
top_left = -1
else:
top_left = color[cur_row - 1][cur_col - 1]
if cur_col == ncols - 1:
top_right = -1
else:
top_right = color[cur_row - 1][cur_col + 1]
return top_left, top_right
def make_features(input_color, nfeat):
nrows, ncols = input_color.shape
feat = np.zeros((nrows * ncols, nfeat))
cur_idx = 0
for i in range(nrows):
for j in range(ncols):
feat[cur_idx, 0] = i
feat[cur_idx, 1] = j
feat[cur_idx, 2] = input_color[i][j]
feat[cur_idx, 3:7] = get_moore_neighbours(input_color, i, j, nrows, ncols)
feat[cur_idx, 7:9] = get_tl_tr(input_color, i, j, nrows, ncols)
feat[cur_idx, 9] = len(np.unique(input_color[i, :]))
feat[cur_idx, 10] = len(np.unique(input_color[:, j]))
feat[cur_idx, 11] = (i + j)
feat[cur_idx, 12] = len(np.unique(input_color[i - local_neighb:i + local_neighb,
j - local_neighb:j + local_neighb]))
cur_idx += 1
return feat
def features(task, mode='train'):
num_train_pairs = len(task[mode])
feat, target = [], []
global local_neighb
for task_num in range(num_train_pairs):
input_color = np.array(task[mode][task_num]['input'])
target_color = task[mode][task_num]['output']
nrows, ncols = len(task[mode][task_num]['input']), len(task[mode][task_num]['input'][0])
target_rows, target_cols = len(task[mode][task_num]['output']), len(task[mode][task_num]['output'][0])
if (target_rows != nrows) or (target_cols != ncols):
print('Number of input rows:', nrows, 'cols:', ncols)
print('Number of target rows:', target_rows, 'cols:', target_cols)
not_valid = 1
return None, None, 1
imsize = nrows * ncols
# offset = imsize*task_num*3 #since we are using three types of aug
feat.extend(make_features(input_color, nfeat))
target.extend(np.array(target_color).reshape(-1, ))
return np.array(feat), np.array(target), 0
# mode = 'eval'
mode = 'test'
if mode == 'eval':
task_path = evaluation_path
elif mode == 'train':
task_path = training_path
elif mode == 'test':
task_path = test_path
all_task_ids = sorted(os.listdir(task_path))
nfeat = 13
local_neighb = 5
valid_scores = {}
model_accuracies = {'ens': []}
pred_taskids = []
for task_id in all_task_ids:
task_file = str(task_path / task_id)
with open(task_file, 'r') as f:
task = json.load(f)
feat, target, not_valid = features(task)
if not_valid:
print('ignoring task', task_file)
print()
not_valid = 0
continue
xgb = XGBClassifier(n_estimators=10, n_jobs=-1)
xgb.fit(feat, target, verbose=-1)
# training on input pairs is done.
# test predictions begins here
num_test_pairs = len(task['test'])
for task_num in range(num_test_pairs):
cur_idx = 0
input_color = np.array(task['test'][task_num]['input'])
nrows, ncols = len(task['test'][task_num]['input']), len(
task['test'][task_num]['input'][0])
feat = make_features(input_color, nfeat)
print('Made predictions for ', task_id[:-5])
preds = xgb.predict(feat).reshape(nrows, ncols)
if (mode == 'train') or (mode == 'eval'):
ens_acc = (np.array(task['test'][task_num]['output']) == preds).sum() / (nrows * ncols)
model_accuracies['ens'].append(ens_acc)
pred_taskids.append(f'{task_id[:-5]}_{task_num}')
# print('ensemble accuracy',(np.array(task['test'][task_num]['output'])==preds).sum()/(nrows*ncols))
# print()
preds = preds.astype(int).tolist()
# plot_test(preds, task_id)
sample_sub1.loc[f'{task_id[:-5]}_{task_num}',
'output'] = flattener(preds)
if (mode == 'train') or (mode == 'eval'):
df = pd.DataFrame(model_accuracies, index=pred_taskids)
print(df.head(10))
print(df.describe())
for c in df.columns:
print(f'for {c} no. of complete tasks is', (df.loc[:, c] == 1).sum())
df.to_csv('ens_acc.csv')
sample_sub1.head()
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
eval_tasks = sorted(os.listdir(evaluation_path))
T = training_tasks
Trains = []
for i in range(400):
task_file = str(training_path / T[i])
task = json.load(open(task_file, 'r'))
Trains.append(task)
E = eval_tasks
Evals = []
for i in range(400):
task_file = str(evaluation_path / E[i])
task = json.load(open(task_file, 'r'))
Evals.append(task)
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
# plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4 * n, 8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
def plot_picture(x):
plt.imshow(np.array(x), cmap=cmap, norm=norm)
plt.show()
def Defensive_Copy(A):
n = len(A)
k = len(A[0])
L = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
L[i, j] = 0 + A[i][j]
return L.tolist()
def Create(task, task_id=0):
n = len(task['train'])
Input = [Defensive_Copy(task['train'][i]['input']) for i in range(n)]
Output = [Defensive_Copy(task['train'][i]['output']) for i in range(n)]
Input.append(Defensive_Copy(task['test'][task_id]['input']))
return Input, Output
def Recolor(task):
Input = task[0]
Output = task[1]
Test_Picture = Input[-1]
Input = Input[:-1]
N = len(Input)
for x, y in zip(Input, Output):
if len(x) != len(y) or len(x[0]) != len(y[0]):
return -1
Best_Dict = -1
Best_Q1 = -1
Best_Q2 = -1
Best_v = -1
# v ranges from 0 to 3. This gives an extra flexibility of measuring distance from any of the 4 corners
Pairs = []
for t in range(15):
for Q1 in range(1, 8):
for Q2 in range(1, 8):
if Q1 + Q2 == t:
Pairs.append((Q1, Q2))
for Q1, Q2 in Pairs:
for v in range(4):
if Best_Dict != -1:
continue
possible = True
Dict = {}
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
color2 = y[i][j]
if color1 != color2:
rule = (p1, p2, color1)
if rule not in Dict:
Dict[rule] = color2
elif Dict[rule] != color2:
possible = False
if possible:
# Let's see if we actually solve the problem
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
rule = (p1, p2, color1)
if rule in Dict:
color2 = 0 + Dict[rule]
else:
color2 = 0 + y[i][j]
if color2 != y[i][j]:
possible = False
if possible:
Best_Dict = Dict
Best_Q1 = Q1
Best_Q2 = Q2
Best_v = v
if Best_Dict == -1:
return -1 # meaning that we didn't find a rule that works for the traning cases
# Otherwise there is a rule: so let's use it:
n = len(Test_Picture)
k = len(Test_Picture[0])
answer = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
if Best_v == 0 or Best_v == 2:
p1 = i % Best_Q1
else:
p1 = (n - 1 - i) % Best_Q1
if Best_v == 0 or Best_v == 3:
p2 = j % Best_Q2
else:
p2 = (k - 1 - j) % Best_Q2
color1 = Test_Picture[i][j]
rule = (p1, p2, color1)
if (p1, p2, color1) in Best_Dict:
answer[i][j] = 0 + Best_Dict[rule]
else:
answer[i][j] = 0 + color1
return answer.tolist()
sample_sub2 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub2.head()
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
example_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# display(example_grid)
print(flattener(example_grid))
Solved = []
Problems = sample_sub2['output_id'].values
Proposed_Answers = []
test_paths_my = {task.stem: json.load(task.open()) for task in test_path.iterdir()}
test_task_ids = np.sort(list(test_paths_my.keys()))
print(Problems, len(Problems))
task_number_my = dict(zip(test_task_ids, np.arange(100)))
for i in range(len(Problems)):
output_id = Problems[i]
task_id = output_id.split('_')[0]
pair_id = int(output_id.split('_')[1])
f = str(test_path / str(task_id + '.json'))
with open(f, 'r') as read_file:
task = json.load(read_file)
n = len(task['train'])
Input = [Defensive_Copy(task['train'][j]['input']) for j in range(n)]
Output = [Defensive_Copy(task['train'][j]['output']) for j in range(n)]
Input.append(Defensive_Copy(task['test'][pair_id]['input']))
solution = Recolor([Input, Output])
pred = ''
if solution != -1:
Solved.append(i)
pred1 = flattener(solution)
pred = pred + pred1 + ' '
if pred == '':
pred = flattener(example_grid)
Proposed_Answers.append(pred)
sample_sub2['output'] = Proposed_Answers
sample_sub1 = sample_sub1.reset_index()
sample_sub1 = sample_sub1.sort_values(by="output_id")
sample_sub2 = sample_sub2.sort_values(by="output_id")
out1 = sample_sub1["output"].astype(str).values
out2 = sample_sub2["output"].astype(str).values
merge_output = []
for o1, o2 in zip(out1, out2):
o = o1.strip().split(" ")[:1] + o2.strip().split(" ")[:2]
o = " ".join(o[:3])
merge_output.append(o)
sample_sub1["output"] = merge_output
sample_sub1["output"] = sample_sub1["output"].astype(str)
# test_paths_my = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
# test_task_ids = np.sort(list(test_paths_my.keys()))
# task_number_my = dict(zip(test_task_ids, np.arange(100)))
submission = sample_sub1.copy()
submission.to_csv("public_submission.csv", index=False)
#generate_public_submission()
import numpy as np
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
import time
from collections import defaultdict
import os
import json
import random
import copy
import networkx as nx
from pathlib import Path
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from itertools import product
import pandas as pd
import multiprocessing
import subprocess
# from moviepy.editor import ImageSequenceClip
# from moviepy.editor import clips_array, CompositeVideoClip
# from moviepy.video.io.html_tools import html_embed, HTML2
# def display_vid(vid, verbose=False, **html_kw):
# """
# Display a moviepy video clip, useful for removing loadbars
# """
# rd_kwargs = {
# 'fps': 10, 'verbose': verbose
# }
# if not verbose:
# rd_kwargs['logger'] = None
# return HTML2(html_embed(vid, filetype=None, maxduration=60,
# center=True, rd_kwargs=rd_kwargs, **html_kw))
data_path = Path('../input/abstraction-and-reasoning-challenge/')
# data_path = Path('.') # Artyom: it's better use symlinks locally
cmap_lookup = [
'#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'
]
cmap_lookup = [np.array([int(x[1:3], 16), int(x[3:5], 16), int(x[5:], 16)]) for x in cmap_lookup]
def cmap(x):
"""
Translate a task matrix to a color coded version
arguments
x : a h x w task matrix
returns
a h x w x 3 matrix with colors instead of numbers
"""
y = np.zeros((*x.shape, 3))
y[x < 0, :] = np.array([112, 128, 144])
y[x > 9, :] = np.array([255, 248, 220])
for i, c in enumerate(cmap_lookup):
y[x == i, :] = c
return y
def draw_one(x, k=20):
"""
Create a PIL image from a task matrix, the task will be
drawn using the default color coding with grid lines
arguments
x : a task matrix
k = 20 : an up scaling factor
returns
a PIL image
"""
img = Image.fromarray(cmap(x).astype(np.uint8)).resize((x.shape[1] * k, x.shape[0] * k), Image.NEAREST)
draw = ImageDraw.Draw(img)
for i in range(x.shape[0]):
draw.line((0, i * k, img.width, i * k), fill=(80, 80, 80), width=1)
for j in range(x.shape[1]):
draw.line((j * k, 0, j * k, img.height), fill=(80, 80, 80), width=1)
return img
def vcat_imgs(imgs, border=10):
"""
Concatenate images vertically
arguments:
imgs : an array of PIL images
border = 10 : the size of space between images
returns:
a PIL image
"""
h = max(img.height for img in imgs)
w = sum(img.width for img in imgs)
res_img = Image.new('RGB', (w + border * (len(imgs) - 1), h), color=(255, 255, 255))
offset = 0
for img in imgs:
res_img.paste(img, (offset, 0))
offset += img.width + border
return res_img
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(n * 4, 8))
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
def go(ax, title, x):
ax.imshow(draw_one(x), interpolation='nearest')
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for i, t in enumerate(task["train"]):
go(axs[0][fig_num], f'Train-{i} in', t["input"])
go(axs[1][fig_num], f'Train-{i} out', t["output"])
fig_num += 1
for i, t in enumerate(task["test"]):
go(axs[0][fig_num], f'Test-{i} in', t["input"])
try:
go(axs[1][fig_num], f'Test-{i} out', t["output"])
except:
go(axs[1][fig_num], f'Test-{i} out', np.zeros_like(t["input"]))
fig_num += 1
plt.tight_layout()
plt.show()
def real_trace_param_automata(input, params, n_iter, n_hidden):
"""
Execute an automata and return all the intermediate states
arguments:
step_fn : transition rule function, should take two arguments `input` and `hidden_i`,
should return an output grid an a new hidden hidden grid
n_iter : num of iteration to perform
n_hidden: number of hidden grids, if set to 0 `hidden_i` will be set to None
laodbar = True: weather display loadbars
returns:
an array of tuples if output and hidden grids
"""
# hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
#
# global_rules, ca_rules = params
#
# trace = [(input, hidden)]
#
# for rule in global_rules:
#
# output, hidden = apply_rule(input, hidden, rule)
# trace.append((output, hidden))
# input = output
#
# its = range(n_iter)
#
# for i_it in its:
# output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
# trace.append((output, hidden))
#
# if (input.shape == output.shape) and (output == input).all():
# break
# input = output
hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
global_rules, ca_rules, split_rule, merge_rule = params
grids = apply_split_rule(input, hidden, split_rule)
#print(grids[0][0])
for rule in global_rules:
for i, (inp, hid) in enumerate(grids):
if rule['macro_type'] == 'global_rule':
if rule['apply_to'] == 'all' or \
(rule['apply_to'] == 'index' and i == rule['apply_to_index']%len(grids) or
(rule['apply_to'] == 'last' and i == len(grids) - 1)):
grids[i] = apply_rule(inp, hid, rule)
elif rule['macro_type'] == 'global_interaction_rule':
grids = apply_interaction_rule(grids, rule)
#print(grids[0][0])
#1/0
for i, (input, hidden) in enumerate(grids):
for _ in range(n_iter):
output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
if np.array_equal(input, output):
break
input = output
grids[i] = (output, hidden)
output = apply_merge_rule(grids, merge_rule, split_rule)
return output
def apply_interaction_rule(grids, rule):
if rule['type'] == 'align_pattern':
# index_from = rule['index_from'] % len(grids)
# index_to = rule['index_to'] % len(grids)
# allow_rotation = rule['allow_rotation']
if len(grids) > 5:
return grids
for index_from in range(len(grids)):
for index_to in range(index_from+1, len(grids)):
input_i = grids[index_from][0]
input_j = grids[index_to][0]
# print(np.max(input_i>0, axis=1))
# print(np.max(input_i>0, axis=1).shape)
# print(np.arange(input_i.shape[0]).shape)
#1/0
i_nonzero_rows = np.arange(input_i.shape[0])[np.max(input_i>0, axis=1)]
i_nonzero_columns = np.arange(input_i.shape[1])[np.max(input_i>0, axis=0)]
j_nonzero_rows = np.arange(input_j.shape[0])[np.max(input_j>0, axis=1)]
j_nonzero_columns = np.arange(input_j.shape[1])[np.max(input_j>0, axis=0)]
if i_nonzero_rows.shape[0] == 0 or i_nonzero_columns.shape[0] == 0 or \
j_nonzero_rows.shape[0] == 0 or j_nonzero_columns.shape[0] == 0:
continue
i_minrow = np.min(i_nonzero_rows)
i_mincol = np.min(i_nonzero_columns)
i_maxrow = np.max(i_nonzero_rows) + 1
i_maxcol = np.max(i_nonzero_columns) + 1
j_minrow = np.min(j_nonzero_rows)
j_mincol = np.min(j_nonzero_columns)
j_maxrow = np.max(j_nonzero_rows) + 1
j_maxcol = np.max(j_nonzero_columns) + 1
figure_to_align = input_i[i_minrow:i_maxrow, i_mincol:i_maxcol]
figure_target = input_j[j_minrow:j_maxrow, j_mincol:j_maxcol]
best_fit = 0
best_i_fit, best_j_fit = -1, -1
#print(figure_to_align)
#print(figure_target)
if figure_to_align.shape[0] < figure_target.shape[0] or figure_to_align.shape[1] < figure_target.shape[1]:
continue
#1/0
else:
for i_start in range((figure_to_align.shape[0] - figure_target.shape[0])+1):
for j_start in range((figure_to_align.shape[1] - figure_target.shape[1])+1):
fig_1 = figure_to_align[i_start:(i_start + figure_target.shape[0]), j_start:(j_start + figure_target.shape[1])]
if np.logical_and(np.logical_and(figure_target > 0, figure_target!=rule['allow_color']), figure_target != fig_1).any():
continue
fit = np.sum(figure_target==fig_1)
if fit > best_fit:
best_i_fit, best_j_fit = i_start, j_start
best_fit = fit
if best_fit == 0:
continue
imin = j_minrow-best_i_fit
imax = j_minrow-best_i_fit + figure_to_align.shape[0]
jmin = j_mincol - best_j_fit
jmax = j_mincol - best_j_fit + figure_to_align.shape[1]
begin_i = max(imin, 0)
begin_j = max(jmin, 0)
end_i = min(imax, input_j.shape[0])
end_j = min(jmax, input_j.shape[1])
i_fig_begin = (begin_i-imin)
i_fig_end = figure_to_align.shape[0]-(imax-end_i)
j_fig_begin = (begin_j-jmin)
j_fig_end = figure_to_align.shape[1]-(jmax-end_j)
if rule['fill_with_color'] == 0:
input_j[begin_i:end_i, begin_j:end_j] = figure_to_align[i_fig_begin:i_fig_end, j_fig_begin:j_fig_end]
else:
for i, j in product(range(end_i-begin_i + 1), range(end_j-begin_j + 1)):
if input_j[begin_i + i, begin_j + j] == 0:
input_j[begin_i + i, begin_j + j] = rule['fill_with_color'] * (figure_to_align[i_fig_begin + i, j_fig_begin + j])
return grids
def trace_param_automata(input, params, n_iter, n_hidden):
# expected = real_trace_param_automata(input, params, n_iter, n_hidden)
#
# testcase = {'input': input, 'params': params}
# print(str(testcase).replace('\'', '"').replace('array(', '').replace(')', ''))
output = cpp_trace_param_automata(input, params, n_iter)
# if not np.array_equal(expected, output):
# print('cpp result is wrong')
# print('input:')
# print(input)
# print('expected:')
# print(expected)
# print('got:')
# print(output)
#
# diff = [[str(g) if e != g else '-' for e, g in zip(exp_row, got_row)]
# for exp_row, got_row in zip(expected, output)]
# diff_lines = [' '.join(line) for line in diff]
# diff_str = '[[' + ']\n ['.join(diff_lines)
#
# print('diff:')
# print(diff_str)
# print('rules')
# print(params)
#
# assert False
return [[output]]
# def vis_automata_trace(states, loadbar=False, prefix_image=None):
# """
# Create a video from an array of automata states
#
# arguments:
# states : array of automata steps, returned by `trace_automata()`
# loadbar = True: weather display loadbars
# prefix_image = None: image to add to the beginning of each frame
# returns
# a moviepy ImageSequenceClip
# """
# frames = []
# if loadbar:
# states = tqdm(states, desc='Frame')
# for i, (canvas, hidden) in enumerate(states):
#
# frame = []
# if prefix_image is not None:
# frame.append(prefix_image)
# frame.append(draw_one(canvas))
# frames.append(vcat_imgs(frame))
#
# return ImageSequenceClip(list(map(np.array, frames)), fps=10)
# def vis_automata_paramed_task(tasks, parameters, n_iter, n_hidden, vis_only_ix=None):
# """
# Visualize the automata steps during the task solution
# arguments:
# tasks : the task to be solved by the automata
# step_fn : automata transition function as passed to `trace_automata()`
# n_iter : number of iterations to perform
# n_hidden : number of hidden girds
# """
#
# n_vis = 0
#
# def go(task, n_vis, test=False):
#
# if vis_only_ix is not None and vis_only_ix != n_vis:
# return
# trace = trace_param_automata(task['input'], parameters, n_iter, n_hidden)
# if not test:
# vid = vis_automata_trace(trace, prefix_image=draw_one(task['output']))
# else:
# vid = vis_automata_trace(trace, prefix_image=draw_one(np.zeros_like(task['input'])))
#
# # display(display_vid(vid))
#
# for task in (tasks['train']):
# n_vis += 1
# go(task, n_vis)
#
# for task in (tasks['test']):
# n_vis += 1
# go(task, n_vis, True)
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
evaluation_tasks = sorted(os.listdir(evaluation_path))
test_tasks = sorted(os.listdir(test_path))
def load_data(p, phase=None):
"""
Load task data
"""
if phase in {'training', 'test', 'evaluation'}:
p = data_path / phase / p
task = json.loads(Path(p).read_text())
dict_vals_to_np = lambda x: {k: np.array(v) for k, v in x.items()}
assert set(task) == {'test', 'train'}
res = dict(test=[], train=[])
for t in task['train']:
assert set(t) == {'input', 'output'}
res['train'].append(dict_vals_to_np(t))
for t in task['test']:
if phase == 'test':
assert set(t) == {'input'}
else:
assert set(t) == {'input', 'output'}
res['test'].append(dict_vals_to_np(t))
return res
nbh = lambda x, i, j: {
(ip, jp) : x[i+ip, j+jp]
for ip, jp in product([1, -1, 0], repeat=2)
if 0 <= i+ip < x.shape[0] and 0 <= j+jp < x.shape[1] and (not (ip==0 and jp==0))
}
def get_random_split_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['nothing', 'color_figures', 'figures', 'macro_multiply'])
if rule['type'] in ['color_figures', 'figures']:
rule['sort'] = random.choice(['biggest', 'smallest'])
if rule['type'] == 'macro_multiply':
rule['k1'] = np.random.randint(config['mink1'], config['maxk1']+1)
rule['k2'] = np.random.randint(config['mink2'], config['maxk2']+1)
return rule
def get_random_merge_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['cellwise_or', 'output_first', 'output_last'])
return rule
def apply_split_rule(input, hidden, split_rule):
if split_rule['type'] == 'nothing':
return [(input, hidden)]
if split_rule['type'] == 'macro_multiply':
ks = split_rule['k1'] * split_rule['k2']
grids = [(np.copy(input), np.copy(hidden)) for _ in range(ks)]
return grids
#split_rule['type'] = 'figures'
dif_c_edge = split_rule['type'] == 'figures'
communities = get_connectivity_info(input, ignore_black=True, edge_for_difcolors=dif_c_edge)
if len(communities) > 0:
if split_rule['sort'] == 'biggest':
communities = communities[::-1]
grids = [(np.zeros_like(input), np.zeros_like(hidden)) for _ in range(len(communities))]
for i in range(len(communities)):
for point in communities[i]:
grids[i][0][point] = input[point]
else:
grids = [(input, hidden)]
return grids
def apply_merge_rule(grids, merge_rule, split_rule):
if split_rule['type'] == 'macro_multiply':
shape_base = grids[0][0].shape
shapes = [arr[0].shape for arr in grids]
if not np.array([shape_base == sh for sh in shapes]).all():
return np.zeros((1, 1), dtype=np.int)
ks_1 = split_rule['k1']
ks_2 = split_rule['k2']
output = np.zeros((shape_base[0] * ks_1, shape_base[1] * ks_2), dtype=np.int8)
for k1 in range(ks_1):
for k2 in range(ks_2):
output[(k1*shape_base[0]):((k1+1) * shape_base[0]), (k2*shape_base[1]):((k2+1) * shape_base[1])] = grids[k1*ks_2 + k2][0]
return output
if merge_rule['type'] == 'cellwise_or':
output = np.zeros_like(grids[0][0])
for i in np.arange(len(grids))[::-1]:
if grids[i][0].shape == output.shape:
output[grids[i][0]>0] = grids[i][0][grids[i][0]>0]
return output
elif merge_rule['type'] == 'output_first':
output = grids[0][0]
elif merge_rule['type'] == 'output_last':
output = grids[-1][0]
return output
def get_random_ca_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
types_possible = \
[
'copy_color_by_direction',
'direct_check',
'indirect_check',
'nbh_check',
'corner_check',
'color_distribution',
]
ca_rules = []
best_candidates_items = list(best_candidates.items())
if len(best_candidates_items) > 0:
for best_score, best_candidates_score in best_candidates_items:
for best_c in best_candidates_score:
gl, ca, _, _ = best_c
ca_rules += [c['type'] for c in ca]
type_counts = dict(zip(types_possible, np.zeros(len(types_possible))))
rules, counts = np.unique(ca_rules, return_counts=True)
for i in range(rules.shape[0]):
type_counts[rules[i]] += counts[i]
counts = np.array(list(type_counts.values()))
if np.sum(counts) > 0:
counts /= np.sum(counts)
else:
counts = np.ones(counts.shape[0]) / counts.shape[0]
uniform = np.ones(counts.shape[0]) / counts.shape[0]
probs = temp * counts + (1 - temp) * uniform
else:
probs = np.ones(len(types_possible)) / len(types_possible)
colors = all_colors[1:]
type_probs = np.ones(len(types_possible)) / len(types_possible)
if r_type is None:
random_type = types_possible[np.random.choice(len(types_possible), p=probs)]
else:
random_type = r_type
def get_random_out_color():
possible_colors = config['possible_colors_out']
return np.random.choice(possible_colors)
def get_random_ignore_colors():
if config['possible_ignore_colors'].shape[0] > 0:
possible_colors = config['possible_ignore_colors']
return possible_colors[np.random.randint(2, size=possible_colors.shape[0]) == 1]
else:
return []
def get_random_all_colors():
return all_colors[np.random.randint(2, size=all_colors.shape[0]) == 1]
def get_random_colors():
return get_random_all_colors()
def get_random_all_color():
return np.random.choice(all_colors)
def get_random_color():
return get_random_all_color()
rule = {}
rule['type'] = random_type
rule['macro_type'] = 'ca_rule'
rule['ignore_colors'] = list(config['ignore_colors'])
if np.random.rand() < 0.5 and config['possible_ignore_colors'].shape[0]:
rule['ignore_colors'] += [random.choice(config['possible_ignore_colors'])]
if random_type == 'copy_color_by_direction':
rule['direction'] = random.choice(['everywhere'])
rule['copy_color'] = [get_random_out_color()]
rule['look_back_color'] = rule['copy_color'][0]
elif random_type == 'corner_check':
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'direct_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'indirect_check':
rule['nbh_check_sum'] = np.random.randint(4)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'nbh_check':
rule['nbh_check_sum'] = np.random.randint(8)
if np.random.rand() < 0.5:
rule['nbh_check_colors'] = [get_random_all_color()]
else:
rule['nbh_check_colors'] = list(np.unique([get_random_all_color(), get_random_all_color()]))
rule['nbh_check_out'] = get_random_out_color()
rule['ignore_colors'] = list(np.unique(rule['ignore_colors'] + [rule['nbh_check_out']]))
elif random_type == 'color_distribution':
rule['direction'] = random.choice(
['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['check_in_empty'] = | np.random.randint(2) | numpy.random.randint |
from collections.abc import Sequence
from functools import reduce
from typing import Any
import numpy as np
from mygrad.operation_base import Operation
__all__ = ["MaxMin", "Sum", "Mean", "Prod", "CumProd", "CumSum", "Variance", "StdDev"]
class MaxMin(Operation):
def __call__(self, a, axis=None, keepdims=False, maxmin=None):
""" Return the maximum (minimum) of a tensor, or along its axes.
Parameters
----------
a : pygrad.Tensor
Input data.
axis : Optional[int, Tuple[int, ...]]
Axis or axes along which to operate. By default, flattened input is used.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
maxmin : str
'max' or 'min'. Selects the operation that is performed
Returns
-------
amax : ndarray
Maximum (minimum) of `a`. If `axis` is None, the result is a 0-D array."""
assert maxmin in ("max", "min"), "Invalid keyword argument"
op = np.argmax if maxmin == "max" else np.argmin
# let numpy handle error checking
np.amax(np.empty([1] * a.ndim), axis=axis, keepdims=keepdims)
self.variables = (a,)
if a.ndim == 0:
return a.data
if hasattr(axis, "__iter__"):
assert isinstance(axis, tuple)
axis = tuple(ax % a.ndim for ax in axis)
axis = None if len(axis) == a.ndim else tuple(sorted(axis))
elif axis is not None:
axis = (axis % a.ndim,)
self.axis = axis
self.keepdims = keepdims
# max(a) -> use argmax
if self.axis is None:
self.indices = np.unravel_index(op(a.data), a.shape)
dat = a.data[self.indices]
# max(x, axis=i) -> use argmax with specified axis
elif len(self.axis) == 1: #
op_index = op(a.data, axis=self.axis[0])
self.indices = list( | np.indices(op_index.shape) | numpy.indices |
import numpy as np
import imageio
from skimage.feature import canny
import cv2
DEG = np.pi / 180.0
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]).astype(np.uint8)
def fast_hough_line(img, angle_step=1, lines_are_white=True, value_threshold=5):
"""
hough line using vectorized numpy operations,
may take more memory, but takes much less time
Input:
img - 2D binary image with nonzeros representing edges
angle_step - Spacing between angles to use every n-th angle
between -90 and 90 degrees. Default step is 1.
lines_are_white - boolean indicating whether lines to be detected are white
value_threshold - Pixel values above or below the value_threshold are edges
Returns:
accumulator - 2D array of the hough transform accumulator
theta - array of angles used in computation, in radians.
rhos - array of rho values. Max size is 2 times the diagonal
distance of the input image.
"""
# Rho and Theta ranges
thetas = np.deg2rad(np.arange(-90.0, 90.0, angle_step)) #can be changed
#width, height = col.size #if we use pillow
width, height = img.shape
diag_len = int(np.ceil(np.sqrt(width * width + height * height))) # max_dist
rhos1 = np.linspace(-diag_len, diag_len, diag_len * 2)
# Cache some resuable values
cos_theta = np.cos(thetas)
sin_theta = np.sin(thetas)
num_thetas = len(thetas)
# Hough accumulator array of theta vs rho
accumulator = np.zeros((2 * diag_len, num_thetas))
are_edges = img > value_threshold if lines_are_white else img < value_threshold
#are_edges = cv2.Canny(img,50,150,apertureSize = 3)
y_idxs, x_idxs = np.nonzero(are_edges) # (row, col) indexes to edges
# Vote in the hough accumulator
xcosthetas = np.dot(x_idxs.reshape((-1,1)), cos_theta.reshape((1,-1)))
ysinthetas = np.dot(y_idxs.reshape((-1,1)), sin_theta.reshape((1,-1)))
rhosmat = np.round(xcosthetas + ysinthetas) + diag_len
rhosmat = rhosmat.astype(np.int16)
for i in range(num_thetas):
rhos, counts = np.unique(rhosmat[:,i], return_counts=True)
accumulator[rhos,i] = counts
return accumulator, thetas, rhos1, np.arange(0,2 * diag_len)
def hough_peaks(H, num_peaks, thetas, rhos):
H_values_sorted = np.sort(np.unique(H))[::-1]
H_values_num_peaks = H_values_sorted[:num_peaks]
peaks = []
for pi in H_values_num_peaks:
indexes = np.argwhere(H == pi)
for find_indexes in indexes:
rho = rhos[find_indexes[0]]
theta = thetas[find_indexes[1]]
peaks.append([rho, theta])
return np.array(peaks[0:num_peaks])
def peak_votes(accumulator, thetas, rhos):
""" Finds the max number of votes in the hough accumulator """
idx = np.argmax(accumulator)
rho = rhos[int(idx / accumulator.shape[1])]
theta = thetas[idx % accumulator.shape[1]]
return idx, theta, rho
def hough_lines_draw(image, peaks, rhos1):
x = np.arange(0, image.shape[0]-1)
H = np.zeros(image.shape)
for i in range(peaks.shape[0]):
d = rhos1[int(peaks[i, 0])]
theta = peaks[i, 1]
y = ((d - x*np.cos(theta))/np.sin(theta)).astype(int)
index = np.where(y<=image.shape[1]-1)[0]
y1 = y[index]
x1 = x[index]
index1 = np.where(y1>=0)[0]
y2 = y1[index1]
x2 = x1[index1]
H[x2, y2] = 1
return H
def show_hough_line(img, accumulator, lines_images, peaks, thetas, rhos, rhos1, save_path=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(10, 10))
ax[0].imshow(img+lines_images, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(accumulator, cmap='jet',
extent=[np.rad2deg(thetas[0]), np.rad2deg(thetas[-1]), rhos1[-1], rhos1[0]])
for ii in range(len(peaks[:, 0])):
ax[1].scatter(np.rad2deg(peaks[ii,1]), rhos1[int(peaks[ii, 0])], color='r')
ax[1].set_aspect('equal', adjustable='box')
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight')
plt.show()
def ComputeGnomon_2(TwiceTheta_Chi, CenterProjection=(45 * DEG, 0 * DEG)):
data_theta = TwiceTheta_Chi[0] / 2.0
data_chi = TwiceTheta_Chi[1]
lat = np.arcsin(np.cos(data_theta * DEG) * np.cos(data_chi * DEG)) # in rads
longit = np.arctan(
-np.sin(data_chi * DEG) / np.tan(data_theta * DEG)) # + ones(len(data_chi))*(np.pi)
centerlat, centerlongit = CenterProjection
slat0 = np.ones(len(data_chi)) * np.sin(centerlat)
clat0 = np.ones(len(data_chi)) * np.cos(centerlat)
longit0 = np.ones(len(data_chi)) * centerlongit
slat = np.sin(lat)
clat = np.cos(lat)
cosanguldist = slat * slat0 + clat * clat0 * np.cos(longit - longit0)
_gnomonx = clat * np.sin(longit0 - longit) / cosanguldist
_gnomony = (slat * clat0 - clat * slat0 * np.cos(longit - longit0)) / cosanguldist
return _gnomonx, _gnomony
def computeGnomonicImage(TwiceTheta,Chi):
# CenterProjectionAngleTheta = 50#45
TwiceTheta_Chi = TwiceTheta,Chi
Xgno,Ygno = ComputeGnomon_2(TwiceTheta_Chi, CenterProjection=(45 * DEG, 0 * DEG))
pts =(np.array([Xgno,Ygno]).T)
nbpeaks=len(pts)
NbptsGno = 300
maxsize = max(Xgno.max(),Ygno.max(),-Xgno.min(),-Ygno.min())+.0
xgnomin,xgnomax,ygnomin,ygnomax=(-0.8,0.8,-0.5,0.5)
xgnomin,xgnomax,ygnomin,ygnomax=(-maxsize,maxsize,-maxsize,maxsize)
halfdiagonal = np.sqrt(xgnomax**2+ygnomax**2)*NbptsGno
XGNO = np.array((Xgno-xgnomin)/(xgnomax-xgnomin)*NbptsGno, dtype=int)
YGNO = np.array((Ygno-ygnomin)/(ygnomax-ygnomin)*NbptsGno, dtype=int)
imageGNO=np.zeros((NbptsGno+1,NbptsGno+1), dtype=int)
imageGNO[XGNO,YGNO]=1
return imageGNO, nbpeaks, halfdiagonal
def InverseGnomon(_gnomonX, _gnomonY):
""" from x,y in gnomonic projection gives lat and long
return theta and chi of Q (direction of Q)
WARNING: assume that center of projection is centerlat, centerlongit = 45 deg, 0
"""
lat0 = np.ones(len(_gnomonX)) * np.pi / 4
longit0 = np.zeros(len(_gnomonX))
Rho = np.sqrt(_gnomonX ** 2 + _gnomonY ** 2) * 1.0
CC = np.arctan(Rho)
# the sign should be - !!
lalat = np.arcsin(np.cos(CC) * np.sin(lat0) + _gnomonY / Rho * np.sin(CC) * np.cos(lat0))
lonlongit = longit0 + np.arctan2(_gnomonX * np.sin(CC),
Rho * np.cos(lat0) * np.cos(CC) - _gnomonY * np.sin(lat0) * np.sin(CC))
Theta = np.arcsin( | np.cos(lalat) | numpy.cos |
import matplotlib.pyplot as plt
import numpy as np
import math
import time
import sys
def input_coordinates(filename, showmap=False):
with open(filename, 'r') as fin:
X = []
Y = []
while True:
line = fin.readline()
if not line:
break
x, y = line.split(', ')
x, y = float(x), float(y)
X.append(x)
Y.append(y)
if showmap:
plt.scatter(X, Y)
return X, Y
def _coordinates_to_distance_table(coordinates):
distance_table = []
for x1, y1 in coordinates:
distance_list = []
for x2, y2 in coordinates:
distance_list.append(math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2))
distance_table.append(distance_list)
return distance_table
def calc_distance(path, X, Y):
distance = 0
i = 0
if isinstance(path, np.ndarray):
n_iter = path.size - 1
else:
n_iter = len(path) - 1
while i < n_iter:
present_idx = path[i]
next_idx = path[i + 1]
distance += math.sqrt((X[present_idx] - X[next_idx]) ** 2
+ (Y[present_idx] - Y[next_idx]) ** 2)
i += 1
distance += math.sqrt((X[0] - X[-1]) ** 2
+ (Y[0] - Y[-1]) ** 2)
return distance
def _prob_exec(prob):
if np.random.rand() <= prob:
return True
else:
return False
def random_path(X, Y):
if len(X) != len(Y):
sys.stderr.write('X and Y are not same length')
n = len(X)
path = np.random.permutation(n)
return path
def _metropolis(path1, X, Y, T):
distance1 = calc_distance(path1, X, Y)
path2 = np.copy(path1)
n = path1.size
swap_cities_idx = np.random.randint(0, n, size=2)
path2[swap_cities_idx[0]], path2[swap_cities_idx[1]] = \
path2[swap_cities_idx[1]], path2[swap_cities_idx[0]]
distance2 = calc_distance(path2, X, Y)
if distance2 < distance1:
return path2, distance2
delta = distance2 - distance1
prob = math.exp(- delta / T)
if _prob_exec(prob):
return path2, distance2
else:
return path1, distance1
def greedy_tsp(X, Y):
coordinates = list(zip(X, Y))
distance_table = _coordinates_to_distance_table(coordinates)
distance_table = np.array(distance_table)
num_of_cities = len(distance_table)
city = np.random.randint(0, num_of_cities)
path = np.array([], dtype='int8')
bin_path = np.ones([num_of_cities], dtype=bool)
falses = np.zeros([num_of_cities], dtype=bool)
for i in range(num_of_cities):
path = | np.append(path, city) | numpy.append |
# Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
# Credits: post-processing adapted from https://github.com/xuannianz/DifferentiableBinarization
import cv2
import numpy as np
from shapely.geometry import Polygon
import pyclipper
from typing import Union, List, Tuple, Optional
from doctr.utils.geometry import fit_rbbox, rbbox_to_polygon
from doctr.utils.common_types import RotatedBbox
from ..core import DetectionPostProcessor
__all__ = ['DBPostProcessor']
class DBPostProcessor(DetectionPostProcessor):
"""Implements a post processor for DBNet adapted from the implementation of `xuannianz
<https://github.com/xuannianz/DifferentiableBinarization>`_.
Args:
unclip ratio: ratio used to unshrink polygons
min_size_box: minimal length (pix) to keep a box
max_candidates: maximum boxes to consider in a single page
box_thresh: minimal objectness score to consider a box
bin_thresh: threshold used to binzarized p_map at inference time
"""
def __init__(
self,
box_thresh: float = 0.1,
bin_thresh: float = 0.3,
rotated_bbox: bool = False,
) -> None:
super().__init__(
box_thresh,
bin_thresh,
rotated_bbox
)
self.unclip_ratio = 2.2 if self.rotated_bbox else 1.5
def polygon_to_box(
self,
points: np.ndarray,
) -> Optional[Union[RotatedBbox, Tuple[float, float, float, float]]]:
"""Expand a polygon (points) by a factor unclip_ratio, and returns a rotated box: x, y, w, h, alpha
Args:
points: The first parameter.
Returns:
a box in absolute coordinates (xmin, ymin, xmax, ymax) or (x, y, w, h, alpha)
"""
poly = Polygon(points)
distance = poly.area * self.unclip_ratio / poly.length # compute distance to expand polygon
offset = pyclipper.PyclipperOffset()
offset.AddPath(points, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
_points = offset.Execute(distance)
# Take biggest stack of points
idx = 0
if len(_points) > 1:
max_size = 0
for _idx, p in enumerate(_points):
if len(p) > max_size:
idx = _idx
max_size = len(p)
# We ensure that _points can be correctly casted to a ndarray
_points = [_points[idx]]
expanded_points = np.asarray(_points) # expand polygon
if len(expanded_points) < 1:
return None
return fit_rbbox(expanded_points) if self.rotated_bbox else cv2.boundingRect(expanded_points)
def bitmap_to_boxes(
self,
pred: np.ndarray,
bitmap: np.ndarray,
) -> np.ndarray:
"""Compute boxes from a bitmap/pred_map
Args:
pred: Pred map from differentiable binarization output
bitmap: Bitmap map computed from pred (binarized)
Returns:
np tensor boxes for the bitmap, each box is a 5-element list
containing x, y, w, h, score for the box
"""
height, width = bitmap.shape[:2]
min_size_box = 1 + int(height / 512)
boxes = []
# get contours from connected components on the bitmap
contours, _ = cv2.findContours(bitmap.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
# Check whether smallest enclosing bounding box is not too small
if np.any(contour[:, 0].max(axis=0) - contour[:, 0].min(axis=0) < min_size_box):
continue
# Compute objectness
if self.rotated_bbox:
score = self.box_score(pred, contour, rotated_bbox=True)
else:
x, y, w, h = cv2.boundingRect(contour)
points = np.array([[x, y], [x, y + h], [x + w, y + h], [x + w, y]])
score = self.box_score(pred, points, rotated_bbox=False)
if self.box_thresh > score: # remove polygons with a weak objectness
continue
_box = self.polygon_to_box(np.squeeze(contour)) if self.rotated_bbox else self.polygon_to_box(points)
if _box is None or _box[2] < min_size_box or _box[3] < min_size_box: # remove to small boxes
continue
if self.rotated_bbox:
x, y, w, h, alpha = _box # type: ignore[misc]
# compute relative box to get rid of img shape
x, y, w, h = x / width, y / height, w / width, h / height
boxes.append([x, y, w, h, alpha, score])
else:
x, y, w, h = _box # type: ignore[misc]
# compute relative polygon to get rid of img shape
xmin, ymin, xmax, ymax = x / width, y / height, (x + w) / width, (y + h) / height
boxes.append([xmin, ymin, xmax, ymax, score])
if self.rotated_bbox:
if len(boxes) == 0:
return np.zeros((0, 6), dtype=pred.dtype)
coord = np.clip(np.asarray(boxes)[:, :4], 0, 1) # clip boxes coordinates
boxes = np.concatenate((coord, np.asarray(boxes)[:, 4:]), axis=1)
return boxes
else:
return np.clip(np.asarray(boxes), 0, 1) if len(boxes) > 0 else np.zeros((0, 5), dtype=pred.dtype)
class _DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_.
Args:
feature extractor: the backbone serving as feature extractor
fpn_channels: number of channels each extracted feature maps is mapped to
"""
shrink_ratio = 0.4
thresh_min = 0.3
thresh_max = 0.7
min_size_box = 3
rotated_bbox: bool = False
@staticmethod
def compute_distance(
xs: np.array,
ys: np.array,
a: np.array,
b: np.array,
eps: float = 1e-7,
) -> float:
"""Compute the distance for each point of the map (xs, ys) to the (a, b) segment
Args:
xs : map of x coordinates (height, width)
ys : map of y coordinates (height, width)
a: first point defining the [ab] segment
b: second point defining the [ab] segment
Returns:
The computed distance
"""
square_dist_1 = np.square(xs - a[0]) + np.square(ys - a[1])
square_dist_2 = np.square(xs - b[0]) + np.square(ys - b[1])
square_dist = np.square(a[0] - b[0]) + np.square(a[1] - b[1])
cosin = (square_dist - square_dist_1 - square_dist_2) / (2 * np.sqrt(square_dist_1 * square_dist_2) + eps)
square_sin = 1 - np.square(cosin)
square_sin = np.nan_to_num(square_sin)
result = np.sqrt(square_dist_1 * square_dist_2 * square_sin / square_dist)
result[cosin < 0] = np.sqrt(np.fmin(square_dist_1, square_dist_2))[cosin < 0]
return result
def draw_thresh_map(
self,
polygon: np.array,
canvas: np.array,
mask: np.array,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Draw a polygon treshold map on a canvas, as described in the DB paper
Args:
polygon : array of coord., to draw the boundary of the polygon
canvas : threshold map to fill with polygons
mask : mask for training on threshold polygons
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise AttributeError("polygon should be a 2 dimensional array of coords")
# Augment polygon by shrink_ratio
polygon_shape = Polygon(polygon)
distance = polygon_shape.area * (1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length
subject = [tuple(coor) for coor in polygon] # Get coord as list of tuples
padding = pyclipper.PyclipperOffset()
padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
padded_polygon = np.array(padding.Execute(distance)[0])
# Fill the mask with 1 on the new padded polygon
cv2.fillPoly(mask, [padded_polygon.astype(np.int32)], 1.0)
# Get min/max to recover polygon after distance computation
xmin = padded_polygon[:, 0].min()
xmax = padded_polygon[:, 0].max()
ymin = padded_polygon[:, 1].min()
ymax = padded_polygon[:, 1].max()
width = xmax - xmin + 1
height = ymax - ymin + 1
# Get absolute polygon for distance computation
polygon[:, 0] = polygon[:, 0] - xmin
polygon[:, 1] = polygon[:, 1] - ymin
# Get absolute padded polygon
xs = np.broadcast_to(np.linspace(0, width - 1, num=width).reshape(1, width), (height, width))
ys = np.broadcast_to(np.linspace(0, height - 1, num=height).reshape(height, 1), (height, width))
# Compute distance map to fill the padded polygon
distance_map = np.zeros((polygon.shape[0], height, width), dtype=polygon.dtype)
for i in range(polygon.shape[0]):
j = (i + 1) % polygon.shape[0]
absolute_distance = self.compute_distance(xs, ys, polygon[i], polygon[j])
distance_map[i] = np.clip(absolute_distance / distance, 0, 1)
distance_map = np.min(distance_map, axis=0)
# Clip the padded polygon inside the canvas
xmin_valid = min(max(0, xmin), canvas.shape[1] - 1)
xmax_valid = min(max(0, xmax), canvas.shape[1] - 1)
ymin_valid = min(max(0, ymin), canvas.shape[0] - 1)
ymax_valid = min(max(0, ymax), canvas.shape[0] - 1)
# Fill the canvas with the distances computed inside the valid padded polygon
canvas[ymin_valid:ymax_valid + 1, xmin_valid:xmax_valid + 1] = np.fmax(
1 - distance_map[
ymin_valid - ymin:ymax_valid - ymin + 1,
xmin_valid - xmin:xmax_valid - xmin + 1
],
canvas[ymin_valid:ymax_valid + 1, xmin_valid:xmax_valid + 1]
)
return polygon, canvas, mask
def compute_target(
self,
target: List[np.ndarray],
output_shape: Tuple[int, int, int],
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
if any(t.dtype not in (np.float32, np.float16) for t in target):
raise AssertionError("the expected dtype of target 'boxes' entry is either 'np.float32' or 'np.float16'.")
if any(np.any((t[:, :4] > 1) | (t[:, :4] < 0)) for t in target):
raise ValueError("the 'boxes' entry of the target is expected to take values between 0 & 1.")
input_dtype = target[0].dtype if len(target) > 0 else np.float32
seg_target = np.zeros(output_shape, dtype=np.uint8)
seg_mask = np.ones(output_shape, dtype=bool)
thresh_target = np.zeros(output_shape, dtype=np.uint8)
thresh_mask = np.ones(output_shape, dtype=np.uint8)
for idx, _target in enumerate(target):
# Draw each polygon on gt
if _target.shape[0] == 0:
# Empty image, full masked
seg_mask[idx] = False
# Absolute bounding boxes
abs_boxes = _target.copy()
abs_boxes[:, [0, 2]] *= output_shape[-1]
abs_boxes[:, [1, 3]] *= output_shape[-2]
abs_boxes = abs_boxes.round().astype(np.int32)
if abs_boxes.shape[1] == 5:
boxes_size = np.minimum(abs_boxes[:, 2], abs_boxes[:, 3])
polys = np.stack([
rbbox_to_polygon(tuple(rbbox)) for rbbox in abs_boxes # type: ignore[arg-type]
], axis=1)
else:
boxes_size = | np.minimum(abs_boxes[:, 2] - abs_boxes[:, 0], abs_boxes[:, 3] - abs_boxes[:, 1]) | numpy.minimum |
from skimage.feature import canny
import scipy.ndimage as ndimage
import cv2
import gdal
import numpy as np
from skimage.morphology import skeletonize
from skimage import measure
from skimage.measure import regionprops
from multiprocessing import pool, cpu_count
from multiprocessing.dummy import Pool as ThreadPool
#define variables
featureList=[]
maxArea=0
xy=[]
areaList = []
indexList =[]
maskBB = []
maskHeight = []
maskLabel = []
maskStd = []
neighbours=0
regionbb=0
mask=0
regionval=0
smallerThan=0
kernel = np.ones((20,20),np.uint8)
def findStdSmaller(regionIndex):
global smallerThan
pos = np.where(maskLabel==neighbours[regionIndex])
mask2 = (regionbb == maskLabel[pos]).astype(np.uint8)*255
mask2 = cv2.dilate(mask2,kernel,iterations = 1)>0
mask2 = np.multiply(mask2,mask>0)
mask2 = np.multiply(mask2,regionval)
hData = mask2[np.where(mask2>0)]
if len(hData)>0:
h = np.mean(hData)
if h<maskHeight[pos]+2:
smallerThan=smallerThan+1
def findDEMFeature(original_dem,index):
global featureList,maxArea,xy,areaList,indexList,maskBB,maskHeight,maskLabel,maskStd,\
neighbours,regionbb,mask,regionval,smallerThan,kernel
height,width=original_dem.shape
region = regionprops(index, original_dem,cache = True)
number_regions=len(region)
for i in range(0,number_regions):
if region[i].area>10000:
areaList.append(region[i].area)
indexList.append(i)
maskBB.append(region[i].bbox)
maskLabel.append(region[i].label)
maskHeight.append(region[i].mean_intensity)
xy = region[i].coords
std = np.std(original_dem[xy[:,0],xy[:,1]])
maskStd.append(std)
areaList = np.array(areaList)
indexList = np.array(indexList)
maskBB = np.array(maskBB)
maskHeight = np.array(maskHeight)
maskLabel = np.array(maskLabel)
maskStd = np.array(maskStd)
order = | np.argsort(-areaList) | numpy.argsort |
#!/usr/bin/env python3
"""
Machine learning for PDF shapes
"""
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import time
import datetime
import numpy as np
import pickle
import pandas as pd
from scipy import stats
from scipy import signal
from scipy import ndimage
from scipy.stats.kde import gaussian_kde
from scipy.spatial import distance
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression, BayesianRidge
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.svm import SVR
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.cluster import KMeans
from sklearn.externals import joblib
from imblearn.over_sampling import RandomOverSampler
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
import utilities as utilities
import lrp
# ===============================================================================
#
# Some defaults variables
#
# ===============================================================================
cmap = [
"#EE2E2F",
"#008C48",
"#185AA9",
"#F47D23",
"#662C91",
"#A21D21",
"#B43894",
"#010202",
]
dashseq = [
(None, None),
[10, 5],
[10, 4, 3, 4],
[3, 3],
[10, 4, 3, 4, 3, 4],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
]
markertype = ["s", "d", "o", "p", "h"]
# ========================================================================
#
# Function definitions
#
# ========================================================================
def load_dice(fname):
"""
Load the data and get a training ready data frame
"""
dat = np.load(fname)
df = pd.DataFrame(
{
"Z": dat["Z"].flatten(),
"Z4": dat["Z4"].flatten(),
"Z8": dat["Z8"].flatten(),
"Z16": dat["Z16"].flatten(),
"Z32": dat["Z32"].flatten(),
"C": dat["C"].flatten(),
"C4": dat["C4"].flatten(),
"C8": dat["C8"].flatten(),
"C16": dat["C16"].flatten(),
"C32": dat["C32"].flatten(),
"SRC_PV": dat["SRC_PV"].flatten(),
"rhoSRC_PV": (dat["Rho"] * dat["SRC_PV"]).flatten(),
"SRC_PV4": dat["SRC_PV4"].flatten(),
"SRC_PV8": dat["SRC_PV8"].flatten(),
"SRC_PV16": dat["SRC_PV16"].flatten(),
"SRC_PV32": dat["SRC_PV32"].flatten(),
"Zvar4": dat["Zvar4"].flatten(),
"Zvar8": dat["Zvar8"].flatten(),
"Zvar16": dat["Zvar16"].flatten(),
"Zvar32": dat["Zvar32"].flatten(),
"Cvar4": dat["Cvar4"].flatten(),
"Cvar8": dat["Cvar8"].flatten(),
"Cvar16": dat["Cvar16"].flatten(),
"Cvar32": dat["Cvar32"].flatten(),
}
)
# Clip variables
df.Z = np.clip(df.Z, 0.0, 1.0)
df.Z4 = np.clip(df.Z4, 0.0, 1.0)
df.Z8 = np.clip(df.Z8, 0.0, 1.0)
df.Z16 = np.clip(df.Z16, 0.0, 1.0)
df.Z32 = np.clip(df.Z32, 0.0, 1.0)
df.C = np.clip(df.C, 0.0, None)
df.C4 = np.clip(df.C4, 0.0, None)
df.C8 = np.clip(df.C8, 0.0, None)
df.C16 = np.clip(df.C16, 0.0, None)
df.C32 = np.clip(df.C32, 0.0, None)
return dat, df
# ========================================================================
def gen_training(df, oname="training"):
"""
Generate scaled training, dev, test arrays
"""
x_vars = get_xnames()
y_vars = get_ynames(df)
return utilities.gen_training(df, x_vars, y_vars, oname)
# ========================================================================
def get_xnames():
return ["C", "Cvar", "Z", "Zvar"]
# ========================================================================
def get_ynames(df):
return [col for col in df if col.startswith("Y")]
# ========================================================================
def closest_point(point, points):
"""Find index of closest point"""
closest_index = distance.cdist([point], np.asarray(points)).argmin()
if isinstance(points, pd.DataFrame):
return points.iloc[closest_index, :]
else:
return points[closest_index, :]
# ========================================================================
def wide_to_narrow(X, Y, bins):
"""
Convert data from predicting a Y(Zbin,Cbin) as a vector to
individual predictions of Y(Zbin,Cbin) given a Zbin and Cbin label
in the input data.
"""
varname = "variable"
valname = "Y"
x_vars = get_xnames()
dev = pd.concat([X, Y], axis=1)
left = pd.melt(
dev.reset_index(),
id_vars=x_vars + ["index"],
value_vars=Y.columns,
var_name=varname,
value_name=valname,
)
right = pd.concat([bins, pd.DataFrame(Y.columns, columns=[varname])], axis=1)
narrow = pd.merge(left, right, on=[varname]).set_index(["index", varname])
narrow = narrow.reindex(X.index, level="index")
return narrow.drop(columns=[valname]), narrow[valname]
# ========================================================================
def narrow_to_wide(Xn, Yn, idx=None):
"""
Reverse of wide_to_narrow
"""
varname = "variable"
valname = "Y"
x_vars = get_xnames()
bin_names = ["Zbins", "Cbins"]
narrow = pd.concat([Xn, Yn], axis=1).drop(columns=bin_names)
wide = narrow.reset_index().pivot(
index="index", columns="variable", values=x_vars + [valname]
)
# Get X
X = wide[x_vars].stack().xs("Y0000", level=varname)
X.index.name = None
# Get Y
Y = wide[valname]
Y.columns.name = None
Y.index.name = None
# Sort according to original wide
if idx is not None:
X = X.reindex(idx)
Y = Y.reindex(idx)
return X, Y
# ========================================================================
def fix_imbalance(df, n_clusters=10):
"""
Fix an imbalanced data set by over sampling minority classes
"""
x_vars = get_xnames()
classes = KMeans(n_clusters=n_clusters, random_state=0).fit_predict(df[x_vars])
ros = RandomOverSampler(random_state=0)
X_resampled, classes_resampled = ros.fit_sample(df, classes)
return pd.DataFrame(X_resampled, columns=df.columns)
# ========================================================================
def deprecated_gen_conditional_means_dice(df, zbin_edges, cbin_edges, oname):
"""
Generate the conditional means for a dataframe
"""
means, _, _, nbins = stats.binned_statistic_2d(
df.Z, df.C, df.rhoSRC_PV, statistic="mean", bins=[zbin_edges, cbin_edges]
)
# Plot
ma_means = np.ma.array(means, mask=np.isnan(means))
cm = matplotlib.cm.viridis
cm.set_bad("white", 1.0)
plt.figure(0)
plt.clf()
im = plt.imshow(
ma_means.T,
extent=[
np.min(zbin_edges),
np.max(zbin_edges),
np.min(cbin_edges),
np.max(cbin_edges),
],
origin="lower",
aspect="auto",
cmap=cm,
)
plt.colorbar(im)
plt.xlabel("Mixture Fraction")
plt.ylabel("Progress Variable")
plt.title("Conditional means")
plt.tight_layout()
plt.savefig(oname + ".png", format="png", dpi=300, bbox_inches="tight")
# Fix nans
means[np.isnan(means)] = 0.0
# Save for later
np.savez_compressed(
oname + ".npz",
means=means,
zbin_edges=zbin_edges,
cbin_edges=cbin_edges,
nbins=nbins,
)
return means
# ========================================================================
def jensen_shannon_divergence(p, q):
"""
This will be part of scipy as some point.
See https://github.com/scipy/scipy/pull/8295
We use this implementation for now: https://stackoverflow.com/questions/15880133/jensen-shannon-divergence
:param p: PDF (normalized to 1)
:type p: array
:param q: PDF (normalized to 1)
:type q: array
"""
eps = 1e-13
M = np.clip(0.5 * (p + q), eps, None)
return 0.5 * (stats.entropy(p, M) + stats.entropy(q, M))
# ========================================================================
def jensen_shannon_distance(p, q):
"""
Jensen-Shannon distance
:param p: PDF (normalized to 1)
:type p: array
:param q: PDF (normalized to 1)
:type q: array
"""
return np.sqrt(jensen_shannon_divergence(p, q))
# ========================================================================
def calculate_jsd(y, yp):
"""
Calculate the JSD metric on each PDF prediction
"""
y = np.asarray(y, dtype=np.float64)
yp = np.asarray(yp, dtype=np.float64)
return np.array(
[jensen_shannon_divergence(y[i, :], yp[i, :]) for i in range(y.shape[0])]
)
# ========================================================================
def pdf_distances(base, datadir="data"):
"""
Compute the minimum JSD between all PDFs in base dice with all the
other dice PDFs.
Only do this with PDFs in the dev set as this is an expensive operation.
:param base: baseline dice
:type base: str
:param datadir: data directory
:type datadir: str
:return: minimum distances
:rtype: dataframe
"""
others = [
"dice_0002",
"dice_0003",
"dice_0004",
"dice_0005",
"dice_0006",
"dice_0007",
"dice_0008",
"dice_0009",
"dice_0010",
]
try:
others.remove(base)
except ValueError:
pass
# Read the baseline PDFs
Ydev_base = pd.read_pickle(os.path.join(datadir, f"{base}_ydev.gz"))
# Compute all the distances and keep the minimum for each baseline sample
distances = {}
for k, other in enumerate(others):
# Get pairwise distance matrix
Ydev_other = pd.read_pickle(os.path.join(datadir, f"{other}_ydev.gz"))
d = distance.cdist(Ydev_base, Ydev_other, jensen_shannon_divergence)
# Find the minimum distance from other to base
idx = d.argmin(axis=0)
distances[other] = pd.DataFrame(index=Ydev_other.index)
distances[other]["r"] = d[idx, np.arange(0, Ydev_other.shape[0])]
distances[other]["idx"] = Ydev_base.index[idx]
# Save
with open(os.path.join(datadir, f"{base}_pdf_distances.pkl"), "wb") as f:
pickle.dump(distances, f, pickle.HIGHEST_PROTOCOL)
return distances
# ========================================================================
def clip_normalize(y):
"""
Clip and normalize (along axis=1)
"""
y = np.clip(y, 0, 1)
return y / np.sum(y, axis=1, keepdims=True)
# ========================================================================
def rmse_metric(true, predicted):
return np.sqrt(mean_squared_error(true, predicted))
# ========================================================================
def error_metrics(true, predicted, verbose=False):
"""
Compute some error metrics
"""
rmse = rmse_metric(true, predicted)
mae = mean_absolute_error(true, predicted)
r2 = r2_score(true, predicted)
if verbose:
print(f"RMSE: {rmse:.3f}")
print(f"MAE: {mae:.3f}")
print(f"R2: {r2:.3f}")
return rmse, mae, r2
# ========================================================================
def src_pv_normalization(
dices=[
"dice_0002",
"dice_0003",
"dice_0004",
"dice_0005",
"dice_0006",
"dice_0007",
"dice_0008",
"dice_0009",
"dice_0010",
],
datadir="data",
):
"""Compute the normalization constant"""
src_pv_sum = 0.0
count = 0
for dice in dices:
pdf = pd.read_pickle(os.path.join(datadir, f"{dice}_pdfs.gz"))
src_pv_sum += np.sum(pdf.SRC_PV ** 2)
count += pdf.SRC_PV.shape[0]
return np.sqrt(src_pv_sum / count)
# ========================================================================
def convolution_means(pdf, means):
"""
Perform the PDF convolution given means
means can be one for each PDF or means_dice.flatten(order='F')
:param pdf: predictions from model (model.predict(X))
:type pdf: array
:param means: conditional means
:type means: array
"""
return np.sum(means * pdf, axis=1)
# ========================================================================
def create_logdir(model_name):
"""Create a log directory for a model"""
time = datetime.datetime.now().strftime("%b%d_%H-%M-%S")
logdir = os.path.abspath(os.path.join("runs", f"{time}_{model_name}"))
if not os.path.exists(logdir):
os.makedirs(logdir)
return logdir
# ========================================================================
def lr_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a Linear Regression
"""
model_name = "LR"
logdir = create_logdir(model_name)
# Training
LR = LinearRegression().fit(Xtrain, Ytrain)
joblib.dump(LR, os.path.join(logdir, model_name + ".pkl"))
mtrain = clip_normalize(LR.predict(Xtrain))
mdev = clip_normalize(LR.predict(Xdev))
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, LR
# ========================================================================
def br_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a Bayesian ridge regression
"""
model_name = "BR"
logdir = create_logdir(model_name)
# Training
BR = MultiOutputRegressor(BayesianRidge()).fit(Xtrain, Ytrain)
joblib.dump(BR, os.path.join(logdir, model_name + ".pkl"))
mtrain = BR.predict(Xtrain)
mdev = BR.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, BR
# ========================================================================
def pr_training(Xtrain, Xdev, Ytrain, Ydev, order=6):
"""
Train using a polynomial regression
"""
model_name = f"PR{order}"
logdir = create_logdir(model_name)
# Training
PR = Pipeline(
[
("poly", PolynomialFeatures(degree=order)),
("linear", LinearRegression(fit_intercept=False)),
]
)
PR = PR.fit(Xtrain, Ytrain)
joblib.dump(PR, os.path.join(logdir, model_name + ".pkl"))
mtrain = clip_normalize(PR.predict(Xtrain))
mdev = clip_normalize(PR.predict(Xdev))
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, PR
# ========================================================================
def svr_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a support vector regression
"""
model_name = "SVR"
logdir = create_logdir(model_name)
# Training
svr = MultiOutputRegressor(SVR(kernel="rbf", epsilon=1e-3))
grid_param_svr = {
"estimator__C": [1e0, 1e1, 1e2, 1e3],
"estimator__gamma": np.logspace(-2, 2, 5),
}
SR = GridSearchCV(estimator=svr, param_grid=grid_param_svr, cv=5, n_jobs=-1).fit(
Xtrain, Ytrain
)
print("Best estimator and parameter set found on training set:")
print(SR.best_estimator_)
print(SR.best_params_)
joblib.dump(SR, os.path.join(logdir, model_name + ".pkl"))
mtrain = SR.predict(Xtrain)
mdev = SR.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, SR
# ========================================================================
def gp_training(Xtrain, Xdev, Ytrain, Ydev):
"""
Train using a gaussian process regression
"""
model_name = "GP"
logdir = create_logdir(model_name)
# Training
kernel = 6.2 ** 2 * Matern(
length_scale=[1, 1, 1, 1], length_scale_bounds=(1e-1, 1e4), nu=1.5
) + WhiteKernel(noise_level=2, noise_level_bounds=(1e-1, 3e0))
GP = GaussianProcessRegressor(
kernel=kernel, alpha=0, n_restarts_optimizer=3, normalize_y=True
).fit(Xtrain, Ytrain)
print("Trained GP kernel:", GP.kernel_)
joblib.dump(GP, os.path.join(logdir, model_name + ".pkl"))
mtrain = GP.predict(Xtrain)
mdev = GP.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, GP
# ========================================================================
def count_rf_parameters(model):
return np.sum([t.tree_.node_count for t in model.estimators_])
# ========================================================================
def rf_training(Xtrain, Xdev, Ytrain, Ydev, nestim=100, max_depth=30):
"""
Train using a Random Forest Regression
"""
# Setup
model_name = "RF"
logdir = create_logdir(model_name)
np.random.seed(985_721)
# Training
start = time.time()
RF = RandomForestRegressor(n_estimators=nestim, max_depth=max_depth, n_jobs=1).fit(
Xtrain, Ytrain
)
end = time.time() - start
joblib.dump(RF, os.path.join(logdir, model_name + ".pkl"))
print("Trained RandomForest")
print(" Feature importance", RF.feature_importances_)
mtrain = RF.predict(Xtrain)
mdev = RF.predict(Xdev)
# Summarize training
summarize_training(
Ytrain,
mtrain,
Ydev,
mdev,
fname=os.path.join(logdir, model_name + ".log"),
timing=end,
dofs=count_rf_parameters(RF),
)
return mtrain, mdev, RF
# ========================================================================
def betaPDF(mean, var, centers, eps=1e-6):
"""
Calculate beta PDF
:param mean: mean
:type mean: float
:param var: variance
:type var: float
:param centers: bin centers
:type centers: array
:param eps: smallness threshold
:type eps: float
:return: pdf
:rtype: array
"""
pdf = np.zeros(centers.shape)
if var < eps:
if mean > np.max(centers):
pdf[-1] = 1.0
return pdf
else:
idx = np.argmax(centers > mean)
if (idx == 0) or (idx == len(pdf) - 1):
pdf[idx] = 1.0
return pdf
else:
pdf[idx - 1] = (centers[idx] - mean) / (centers[idx] - centers[idx - 1])
pdf[idx] = (mean - centers[idx - 1]) / (centers[idx] - centers[idx - 1])
return pdf
elif var > mean * (1.0 - mean):
pdf[0] = 1.0 - mean
pdf[-1] = mean
return pdf
else:
a = mean * (mean * (1.0 - mean) / var - 1.0)
b = a / mean - a
ni = 1024
x = np.linspace(0, 1, ni)
pdf = np.interp(centers, x, stats.beta.pdf(x, a, b))
pdf /= np.sum(pdf)
return pdf
# ========================================================================
class AnalyticalPDFModel:
"""Generic analytical PDF model"""
def __init__(self, zbin_edges, cbin_edges):
"""
:param zbin_edges: bin edges for Z
:type bins: array
:param cbin_edges: bin edges for C
:type bins: array
"""
self.zbin_edges = zbin_edges
self.cbin_edges = cbin_edges
self.eps = 1e-13
self.cscale = cbin_edges[-1]
self.nc = len(cbin_edges) - 1
self.nz = len(zbin_edges) - 1
self.cbin_centers = utilities.edges_to_midpoint(cbin_edges)
self.zbin_centers = utilities.edges_to_midpoint(zbin_edges)
self.cbin_widths = np.diff(cbin_edges)
self.zbin_widths = np.diff(zbin_edges)
self.seed = 9_023_457
# ========================================================================
class DD(AnalyticalPDFModel):
"""
delta(Z) - delta(C) PDF
"""
def __init__(self, zbin_edges, cbin_edges):
super().__init__(zbin_edges, cbin_edges)
def predict(self, X):
"""
:param X: conditional variables
:type X: dataframe
:return: PDFs
:rtype: array
"""
# Get indexes for the bins
self.zbin_edges[-1] += self.eps
self.cbin_edges[-1] += self.eps
idx_z = np.digitize(X.Z, self.zbin_edges)
idx_c = np.digitize(X.C, self.cbin_edges)
# Generate delta PDFs
return np.array(
[
signal.unit_impulse(
(self.nz, self.nc), (idx_z[i] - 1, idx_c[i] - 1)
).flatten(order="F")
for i in range(X.shape[0])
]
)
# ========================================================================
class BD(AnalyticalPDFModel):
"""
beta(Z) - delta(C) PDF
"""
def __init__(self, zbin_edges, cbin_edges):
super().__init__(zbin_edges, cbin_edges)
def predict(self, X):
"""
:param X: conditional variables
:type X: dataframe
:return: PDFs
:rtype: array
"""
self.cbin_edges[-1] += self.eps
idx_c = np.digitize(X.C, self.cbin_edges)
# Generate beta-delta PDFs
npdfs = X.shape[0]
pdfs = np.zeros((X.shape[0], self.nz * self.nc))
np.random.seed(self.seed)
for i in range(npdfs):
c_pdf = signal.unit_impulse(self.nc, idx_c[i] - 1)
z_pdf = betaPDF(X.Z.iloc[i], X.Zvar.iloc[i], self.zbin_centers)
pdfs[i, :] = np.outer(z_pdf, c_pdf).flatten(order="F")
return pdfs
# ========================================================================
class BB(AnalyticalPDFModel):
"""
beta(Z) - beta(C) PDF
"""
def __init__(self, zbin_edges, cbin_edges):
super().__init__(zbin_edges, cbin_edges)
def predict(self, X):
"""
:param X: conditional variables
:type X: dataframe
:return: PDFs
:rtype: array
"""
# Generate beta-delta PDFs
npdfs = X.shape[0]
pdfs = np.zeros((X.shape[0], self.nz * self.nc))
np.random.seed(self.seed)
for i in range(npdfs):
c_pdf = betaPDF(
X.C.iloc[i] / self.cscale,
X.Cvar.iloc[i] / (self.cscale ** 2),
self.cbin_centers / self.cscale,
)
z_pdf = betaPDF(X.Z.iloc[i], X.Zvar.iloc[i], self.zbin_centers)
pdfs[i, :] = np.outer(z_pdf, c_pdf).flatten(order="F")
return pdfs
# ========================================================================
# Torch Variable handler
class VariableHandler:
def __init__(self, device=torch.device("cpu"), dtype=torch.float):
self.device = device
self.dtype = dtype
def tovar(self, input):
return Variable(torch.as_tensor(input, dtype=self.dtype, device=self.device))
# ========================================================================
# Network Architecture from infoGAN (https://arxiv.org/abs/1606.03657)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.input_height = 32
self.input_width = 64
self.input_dim = 1 + 4
self.output_dim = 1
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim, 64, 4, 2, 1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
)
self.fc = nn.Sequential(
nn.Linear(128 * (self.input_height // 4) * (self.input_width // 4), 1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.2),
nn.Linear(1024, self.output_dim),
nn.Sigmoid(),
)
def forward(self, input, label):
x = torch.cat([input, label], 1)
x = self.conv(x)
x = x.view(-1, 128 * (self.input_height // 4) * (self.input_width // 4))
x = self.fc(x)
return x
def load(self, fname):
"""Load pickle file containing model"""
self.load_state_dict(
torch.load(fname, map_location=lambda storage, loc: storage)
)
self.eval()
# ========================================================================
class SoftmaxImage(nn.Module):
"""Apply Softmax on an image.
Softmax2d applies on second dimension (i.e. channels), which is
not what I want. This applies along the H and W dimensions, where
(N, C, H, W) is the size of the input.
"""
def __init__(self, channels, height, width):
super(SoftmaxImage, self).__init__()
self.channels = channels
self.height = height
self.width = width
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
x = x.view(-1, self.channels, self.height * self.width)
x = self.softmax(x)
x = x.view(-1, self.channels, self.height, self.width)
return x
# ========================================================================
# Network Architecture from infoGAN (https://arxiv.org/abs/1606.03657)
class Generator(nn.Module):
def __init__(self, noise_size, vh=None):
super(Generator, self).__init__()
self.input_height = 32
self.input_width = 64
self.noise_size = noise_size
self.input_dim = noise_size + 4
self.output_dim = 1
if vh is None:
self.vh = VariableHandler()
else:
self.vh = vh
self.fc = nn.Sequential(
nn.Linear(self.input_dim, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(),
nn.Linear(1024, 128 * (self.input_height // 4) * (self.input_width // 4)),
nn.BatchNorm1d(128 * (self.input_height // 4) * (self.input_width // 4)),
nn.ReLU(),
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1),
SoftmaxImage(1, self.input_height, self.input_width),
)
def forward(self, input, label):
x = torch.cat([input, label], 1)
x = self.fc(x)
x = x.view(-1, 128, (self.input_height // 4), (self.input_width // 4))
x = self.deconv(x)
return x
def inference(self, x):
noise = self.vh.tovar(torch.rand(x.shape[0], self.noise_size))
return self.forward(noise, x)
def predict(self, X, batch_size=64, nestim=1):
X = np.asarray(X, dtype=np.float64)
n = X.shape[0]
meval = np.zeros((n, self.input_height * self.input_width))
for batch, i in enumerate(range(0, n, batch_size)):
slc = np.s_[i : i + batch_size, :]
xsub = self.vh.tovar(X[slc])
meval[slc] = np.mean(
[
self.inference(xsub).cpu().data.numpy().reshape(xsub.shape[0], -1)
for j in range(nestim)
]
)
return meval
def load(self, fname):
"""Load pickle file containing model"""
self.load_state_dict(
torch.load(fname, map_location=lambda storage, loc: storage)
)
self.eval()
# ========================================================================
def cgan_training(Xtrain, Xdev, Ytrain, Ydev, use_gpu=False):
"""
Train using a conditional GAN
"""
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
dtype = torch.double
vh = VariableHandler(device=device, dtype=dtype)
# Make sure inputs are numpy arrays
Xtrain = np.asarray(Xtrain, dtype=np.float64)
Ytrain = np.asarray(Ytrain, dtype=np.float64)
Xdev = np.asarray(Xdev, dtype=np.float64)
Ydev = np.asarray(Ydev, dtype=np.float64)
# Sizes
batch_size = 64
input_height = 32
input_width = 64
nsample_lbls = 16
nsample_noise = 10
noise_size = 100
nlabels = Xtrain.shape[1]
torch.manual_seed(5_465_462)
# Construct the G and D models
D = Discriminator().to(device=device, dtype=dtype)
G = Generator(noise_size, vh).to(device=device, dtype=dtype)
# The number of times entire dataset is trained
nepochs = 500
# Learning rate
lr_D = 1e-3
lr_G = 1e-3
decay_rate = 0.98
# Loss and optimizers
criterion = nn.BCELoss().to(device=device)
D_optimizer = optim.SGD(D.parameters(), lr=lr_D, momentum=0.5, nesterov=True)
G_optimizer = optim.SGD(G.parameters(), lr=lr_G, momentum=0.5, nesterov=True)
D_scheduler = optim.lr_scheduler.StepLR(D_optimizer, step_size=1, gamma=decay_rate)
G_scheduler = optim.lr_scheduler.StepLR(G_optimizer, step_size=1, gamma=decay_rate)
# Tensorboard writer
writer = SummaryWriter()
logdir = writer.file_writer.get_logdir()
model_name = "CGAN"
# Validation images, labels and noise
xdev_sub = vh.tovar(Xdev[:nsample_lbls, :])
ydev_sub = vh.tovar(Ydev[:nsample_lbls, :])
valimgs = ydev_sub.view(nsample_lbls, -1, input_height, input_width)
vallbl = xdev_sub.expand(input_height, input_width, nsample_lbls, nlabels).permute(
2, 3, 0, 1
)
grid = vutils.make_grid(valimgs, nrow=nsample_lbls, normalize=True, scale_each=True)
writer.add_image("True PDF", grid, 0)
fixed_noise = vh.tovar(
torch.rand(nsample_noise, noise_size)
.to(device=device)
.repeat(1, nsample_lbls)
.reshape(-1, noise_size)
)
fixed_labels = xdev_sub.repeat(nsample_noise, 1)
# Graphs in Tensorboard
xdummy = vh.tovar(torch.rand(1, 1, input_height, input_width))
ldummy = vh.tovar(torch.rand(1, nlabels, input_height, input_width))
writer.add_graph(D, (xdummy, ldummy), verbose=False)
writer.add_graph(G, (fixed_noise, fixed_labels), verbose=False)
# Train the model
nbatches = Xtrain.shape[0] // batch_size
D.train()
for epoch in range(nepochs):
G.train()
permutation = torch.randperm(Xtrain.shape[0])
for batch, i in enumerate(range(0, Xtrain.shape[0], batch_size)):
# Global step
step = epoch * nbatches + batch
# Take a batch
indices = permutation[i : i + batch_size]
batch_x = vh.tovar(Xtrain[indices, :])
batch_y = vh.tovar(Ytrain[indices, :])
# Reshape these for the D network
actual_batch_size = batch_x.shape[0]
labels = batch_x.expand(
input_height, input_width, actual_batch_size, nlabels
).permute(2, 3, 0, 1)
imgs = batch_y.view(actual_batch_size, -1, input_height, input_width)
noise = vh.tovar(torch.rand((actual_batch_size, noise_size)))
# Real and fake labels
real_label = vh.tovar(torch.ones(actual_batch_size, 1))
fake_label = vh.tovar(torch.zeros(actual_batch_size, 1))
# update the D network
D_optimizer.zero_grad()
D_real = D(imgs, labels)
D_real_loss = criterion(D_real, real_label)
G_ = G(noise, batch_x)
D_fake = D(G_, labels)
D_fake_loss = criterion(D_fake, fake_label)
D_loss = D_real_loss + D_fake_loss
writer.add_scalar("D_real_loss", D_real_loss.item(), step)
writer.add_scalar("D_fake_loss", D_fake_loss.item(), step)
writer.add_scalar("D_loss", D_loss.item(), step)
D_loss.backward()
D_optimizer.step()
# update G network
G_optimizer.zero_grad()
G_ = G(noise, batch_x)
D_fake = D(G_, labels)
G_loss = criterion(D_fake, real_label)
writer.add_scalar("G_loss", G_loss.item(), step)
G_loss.backward()
G_optimizer.step()
if batch % 10 == 0:
print(
"Epoch [{0:d}/{1:d}], Batch [{2:d}/{3:d}], D_loss: {4:.4e}, G_loss: {5:.4e}".format(
epoch + 1,
nepochs,
batch + 1,
nbatches,
D_loss.item(),
G_loss.item(),
)
)
# Adaptive time step
G_scheduler.step()
D_scheduler.step()
for param_group in D_optimizer.param_groups:
print("Current learning rate for discriminator:", param_group["lr"])
for param_group in G_optimizer.param_groups:
print(" for generator:", param_group["lr"])
# Visualize results in Tensorboard
G.eval()
samples = G(fixed_noise, fixed_labels)
grid = vutils.make_grid(
samples, nrow=nsample_lbls, normalize=True, scale_each=True
)
writer.add_image("Generator", grid, step)
# Save the models
torch.save(G.state_dict(), os.path.join(logdir, model_name + "_G.pkl"))
torch.save(D.state_dict(), os.path.join(logdir, model_name + "_D.pkl"))
writer.close()
# Stuff we need to do to get plots...
G.eval()
mtrain = G.predict(Xtrain)
mdev = G.predict(Xdev)
# Summarize training
summarize_training(
Ytrain, mtrain, Ydev, mdev, os.path.join(logdir, model_name + ".log")
)
return mtrain, mdev, G
# ========================================================================
# Conditional variational autoencoder
# CVAE paper: Learning Structured Output Representation using Deep Conditional Generative Models
# https://papers.nips.cc/paper/5775-learning-structured-output-representation-using-deep-conditional-generative-models
# code adapted from https://github.com/timbmg/VAE-CVAE-MNIST/blob/master/models.py
class CVAE(nn.Module):
def __init__(
self, encoder_layer_sizes, latent_size, decoder_layer_sizes, nlabels=0, vh=None
):
super(CVAE, self).__init__()
self.latent_size = latent_size
self.decoder_layer_sizes = decoder_layer_sizes
if vh is None:
self.vh = VariableHandler()
else:
self.vh = vh
self.encoder = Encoder(encoder_layer_sizes, latent_size, nlabels).to(
device=vh.device, dtype=vh.dtype
)
self.decoder = Decoder(decoder_layer_sizes, latent_size, nlabels).to(
device=vh.device, dtype=vh.dtype
)
def forward(self, x, c):
batch_size = x.size(0)
means, log_var = self.encoder(x, c)
std = torch.exp(0.5 * log_var)
eps = self.vh.tovar(torch.randn([batch_size, self.latent_size]))
z = eps * std + means
recon_x = self.decoder(z, c)
return recon_x, means, log_var, z
def inference(self, c):
z = self.vh.tovar(torch.randn(c.shape[0], self.latent_size))
recon_x = self.decoder(z, c)
return recon_x
def predict(self, X, batch_size=64, nestim=1):
X = np.asarray(X, dtype=np.float64)
n = X.shape[0]
meval = np.zeros((n, self.decoder_layer_sizes[-1]))
for batch, i in enumerate(range(0, n, batch_size)):
slc = np.s_[i : i + batch_size, :]
c = self.vh.tovar(X[slc])
meval[slc] = np.mean(
[self.inference(c).cpu().data.numpy() for j in range(nestim)], axis=0
)
return meval
def load(self, fname):
"""Load pickle file containing model"""
self.load_state_dict(
torch.load(fname, map_location=lambda storage, loc: storage)
)
self.eval()
class Encoder(nn.Module):
def __init__(self, layer_sizes, latent_size, nlabels):
super(Encoder, self).__init__()
self.MLP = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
self.MLP.add_module(name="L%i" % (i), module=nn.Linear(in_size, out_size))
self.MLP.add_module(name="A%i" % (i), module=nn.ReLU())
self.linear_means = nn.Linear(layer_sizes[-1], latent_size)
self.linear_log_var = nn.Linear(layer_sizes[-1], latent_size)
def forward(self, x, c):
x = torch.cat((x, c), dim=-1)
x = self.MLP(x)
means = self.linear_means(x)
log_vars = self.linear_log_var(x)
return means, log_vars
class Decoder(nn.Module):
def __init__(self, layer_sizes, latent_size, nlabels):
super(Decoder, self).__init__()
self.MLP = nn.Sequential()
input_size = latent_size + nlabels
for i, (in_size, out_size) in enumerate(
zip([input_size] + layer_sizes[:-1], layer_sizes)
):
self.MLP.add_module(name="L%i" % (i), module=nn.Linear(in_size, out_size))
if i + 1 < len(layer_sizes):
self.MLP.add_module(name="A%i" % (i), module=nn.ReLU())
else:
self.MLP.add_module(name="softmax", module=nn.Softmax(dim=1))
def forward(self, z, c):
z = torch.cat((z, c), dim=-1)
x = self.MLP(z)
return x
def loss_fn(recon_x, x, mean, log_var):
BCE = nn.functional.binary_cross_entropy(recon_x, x, reduction="sum")
KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
return BCE + KLD
def cvae_training(Xtrain, Xdev, Ytrain, Ydev, use_gpu=False):
"""
Train using a conditional VAE
"""
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
vh = VariableHandler(device=device, dtype=torch.double)
# Make sure inputs are numpy arrays
Xtrain = np.asarray(Xtrain, dtype=np.float64)
Ytrain = np.asarray(Ytrain, dtype=np.float64)
Xdev = np.asarray(Xdev, dtype=np.float64)
Ydev = np.asarray(Ydev, dtype=np.float64)
# Sizes
nlabels = Xtrain.shape[1]
input_size = Ytrain.shape[1]
batch_size = 64
encoder_layer_sizes = [input_size + nlabels, 512, 256]
latent_size = 10
decoder_layer_sizes = [256, 512, input_size]
torch.manual_seed(5_465_462)
# The number of times entire dataset is trained
nepochs = 500
# Learning rate
lr = 1e-3
# CVAE model
cvae = CVAE(
encoder_layer_sizes=encoder_layer_sizes,
latent_size=latent_size,
decoder_layer_sizes=decoder_layer_sizes,
nlabels=nlabels,
vh=vh,
).to(device=device)
# Optimizer
optimizer = optim.Adam(cvae.parameters(), lr=lr)
# Tensorboard writer
writer = SummaryWriter()
logdir = writer.file_writer.get_logdir()
model_name = "CVAE"
# Graphs in Tensorboard
xdummy = vh.tovar(torch.rand(1, input_size))
ldummy = vh.tovar(torch.rand(1, nlabels))
writer.add_graph(cvae, (xdummy, ldummy), verbose=False)
# Train the model
nbatches = Xtrain.shape[0] // batch_size
start = time.time()
for epoch in range(nepochs):
cvae.train()
permutation = torch.randperm(Xtrain.shape[0])
for batch, i in enumerate(range(0, Xtrain.shape[0], batch_size)):
# Global step
step = epoch * nbatches + batch
# Take a batch
indices = permutation[i : i + batch_size]
batch_c = vh.tovar(Xtrain[indices, :])
batch_x = vh.tovar(Ytrain[indices, :])
# Forward model
recon_x, mean, log_var, z = cvae(batch_x, batch_c)
# Loss
loss = loss_fn(recon_x, batch_x, mean, log_var)
writer.add_scalar("loss", loss, step)
if batch % 10 == 0:
print(
"Epoch [{0:d}/{1:d}], Batch [{2:d}/{3:d}], Loss: {4:.4e}".format(
epoch + 1, nepochs, batch + 1, nbatches, loss
)
)
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Save the models
torch.save(cvae.state_dict(), os.path.join(logdir, model_name + ".pkl"))
end = time.time() - start
writer.close()
cvae.eval()
mtrain = cvae.predict(Xtrain)
mdev = cvae.predict(Xdev)
# Summarize training
summarize_training(
Ytrain,
mtrain,
Ydev,
mdev,
fname=os.path.join(logdir, model_name + ".log"),
timing=end,
dofs=count_parameters(cvae),
)
return mtrain, mdev, cvae
# ========================================================================
# Fully connected NN
class Net(nn.Module):
def __init__(self, input_size, layer_sizes, vh=None):
super(Net, self).__init__()
if vh is None:
self.vh = VariableHandler()
else:
self.vh = vh
self.input_size = input_size
self.layer_sizes = layer_sizes
self.MLP = nn.Sequential()
for i, (in_size, out_size) in enumerate(
zip([input_size] + layer_sizes[:-1], layer_sizes)
):
if i + 1 < len(layer_sizes):
self.MLP.add_module(
name="L%i" % (i), module=nn.Linear(in_size, out_size)
)
self.MLP.add_module(name="A%i" % (i), module=nn.LeakyReLU())
self.MLP.add_module(name="B%i" % (i), module=nn.BatchNorm1d(out_size))
else:
self.MLP.add_module(
name="L%i" % (i), module=nn.Linear(in_size, out_size)
)
self.MLP.add_module(name="softmax", module=nn.Softmax(dim=1))
def forward(self, x):
return self.MLP(x)
def predict(self, X, batch_size=64):
X = np.asarray(X, dtype=np.float64)
n = X.shape[0]
meval = np.zeros((n, self.layer_sizes[-1]))
for batch, i in enumerate(range(0, n, batch_size)):
slc = np.s_[i : i + batch_size, :]
meval[slc] = self.forward(self.vh.tovar(X[slc])).cpu().data.numpy()
return meval
def load(self, fname):
"""Load pickle file containing model"""
self.load_state_dict(
torch.load(fname, map_location=lambda storage, loc: storage)
)
self.eval()
# ========================================================================
# Clip to [0,1] and normalize (because we are predicting a PDF)
class ClampNorm(nn.Module):
def __init__(self):
super(ClampNorm, self).__init__()
def forward(self, x):
out = x.clamp(0.0, 1.0)
return out / out.sum(1, keepdim=True)
# ========================================================================
# Linear regression
class LinearRegNet(nn.Module):
def __init__(self, D_in, D_out):
super(LinearRegNet, self).__init__()
self.fc1 = nn.Linear(D_in, D_out)
def forward(self, x):
out = self.fc1(x)
return out
# ========================================================================
class RelErrorLoss(nn.Module):
def __init__(self):
super(RelErrorLoss, self).__init__()
self.eps = 1e-6
def forward(self, input, target):
return torch.mean(torch.abs(target - input) / (target + self.eps))
# ========================================================================
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# ========================================================================
def dnn_training(Xtrain, Xdev, Ytrain, Ydev, use_gpu=False):
"""
Train using a deep neural network
"""
if use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
dtype = torch.double
vh = VariableHandler(device=device, dtype=dtype)
# Make sure inputs are numpy arrays
Xtrain = np.asarray(Xtrain, dtype=np.float64)
Ytrain = np.asarray(Ytrain, dtype=np.float64)
Xdev = np.asarray(Xdev, dtype=np.float64)
Ydev = np.asarray(Ydev, dtype=np.float64)
# N is batch size; D_in is input dimension; D_out is output dimension
batch_size = 64
input_size = Xtrain.shape[1]
layer_sizes = [256, 512, Ytrain.shape[1]]
torch.manual_seed(5_465_462)
# Construct the NN model
model = Net(input_size, layer_sizes, vh).to(device=device, dtype=dtype)
# The number of times entire dataset is trained
nepochs = 500
# Learning rate
learning_rate = 1e-4
# Loss and optimizer
# criterion = nn.BCELoss().to(device=device)
criterion = nn.MSELoss().to(device=device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Tensorboard output
writer = SummaryWriter()
logdir = writer.file_writer.get_logdir()
model_name = "DNN"
xdummy = vh.tovar(torch.randn(1, Xtrain.shape[1]))
writer.add_graph(model, (xdummy,), verbose=True)
# Train the model
nbatches = Xtrain.shape[0] // batch_size
start = time.time()
for epoch in range(nepochs):
model.train()
permutation = torch.randperm(Xtrain.shape[0])
for batch, i in enumerate(range(0, Xtrain.shape[0], batch_size)):
# Global step
step = epoch * nbatches + batch
# Take a batch
indices = permutation[i : i + batch_size]
batch_x = vh.tovar(Xtrain[indices, :])
batch_y = vh.tovar(Ytrain[indices, :])
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(batch_x)
# Compute and log information
loss = criterion(y_pred, batch_y)
writer.add_scalar("loss", loss.item(), step)
if batch % 10 == 0:
print(
"Epoch [{0:d}/{1:d}], Batch [{2:d}/{3:d}], Loss: {4:.4e}".format(
epoch + 1, nepochs, batch + 1, nbatches, loss.item()
)
)
# # Logging to tensorboardX
# writer.add_text("Text", "text logged at step:" + str(step), step)
# for name, param in model.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), step)
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Validation loss and adaptive time step
model.eval()
val_loss = criterion(model(vh.tovar(Xdev)), vh.tovar(Ydev))
writer.add_scalar("val_loss", val_loss.item(), step)
print(
"Epoch [{0:d}/{1:d}], Validation loss: {2:.4e}".format(
epoch + 1, nepochs, val_loss.item()
)
)
for param_group in optimizer.param_groups:
print("Current learning rate", param_group["lr"])
# Save the models
torch.save(model.state_dict(), os.path.join(logdir, model_name + ".pkl"))
end = time.time() - start
writer.close()
model.eval()
mtrain = model.predict(Xtrain)
mdev = model.predict(Xdev)
# Summarize training
summarize_training(
Ytrain,
mtrain,
Ydev,
mdev,
fname=os.path.join(logdir, model_name + ".log"),
timing=end,
dofs=count_parameters(model),
)
return mtrain, mdev, model
# ========================================================================
def predict_all_dice(model, model_scaler, datadir="data", half=False):
"""
Predict on data from all dices
"""
lst = []
dices = [
"dice_0002",
"dice_0003",
"dice_0004",
"dice_0005",
"dice_0006",
"dice_0007",
"dice_0008",
"dice_0009",
"dice_0010",
]
# Normalization constant (computed on all the data)
src_pv_norm = src_pv_normalization()
for dice in dices:
print(f"Predicting model on {dice}")
# Load data
pdf = pd.read_pickle(os.path.join(datadir, f"{dice}_pdfs.gz"))
means = pd.read_pickle(os.path.join(datadir, f"{dice}_src_pv_means.gz"))
Xdev = pd.read_pickle(os.path.join(datadir, f"{dice}_xdev.gz"))
Ydev = pd.read_pickle(os.path.join(datadir, f"{dice}_ydev.gz"))
dat = np.load(os.path.join(datadir, f"{dice}.npz"))
z = dat["z"]
# Switch scaler
scaler = joblib.load(os.path.join(datadir, f"{dice}_scaler.pkl"))
Xdev = utilities.switch_scaler(Xdev, scaler, model_scaler)
if half:
idx = pdf.xc > 0
Xdev = Xdev.loc[idx.loc[Xdev.index]]
Ydev = Ydev.loc[idx.loc[Ydev.index]]
# Prediction
mdev = model.predict(Xdev)
jsd = calculate_jsd(Ydev, mdev)
jsd90 = np.percentile(jsd, [90])
# Perform convolution
conv = convolution_means(mdev, means.loc[Ydev.index])
rmse, mae, r2 = error_metrics(pdf.SRC_PV.loc[Ydev.index], conv)
lst.append(
{
"z": z,
"rmse": rmse / src_pv_norm,
"mae": mae,
"r2": r2,
"jsd90": jsd90[0],
}
)
return pd.DataFrame(lst)
# ========================================================================
def predict_full_dices(
model,
model_scaler,
dices=[
"dice_0002",
"dice_0003",
"dice_0004",
"dice_0005",
"dice_0006",
"dice_0007",
"dice_0008",
"dice_0009",
"dice_0010",
],
datadir="data",
half=False,
):
"""
Predict on all data from all dices
"""
lst = []
for dice in dices:
print(f"Predicting model on {dice}")
# Load data
pdf = pd.read_pickle(os.path.join(datadir, f"{dice}_pdfs.gz"))
means = pd.read_pickle(os.path.join(datadir, f"{dice}_src_pv_means.gz"))
X = pd.DataFrame(
model_scaler.transform(pdf[get_xnames()]),
index=pdf.index,
columns=get_xnames(),
)
# Prediction
mpred = model.predict(X)
# Perform convolution and save data
df = pd.DataFrame(
{
"xc": pdf.xc,
"yc": pdf.yc,
"zc": pdf.zc,
"exact": pdf.SRC_PV,
"model": convolution_means(mpred, means),
},
index=pdf.index,
)
df["dice"] = dice
lst.append(df)
return pd.concat(lst)
# ========================================================================
def lrp_all_dice(DNN, model_scaler, datadir="data"):
"""
Calculate DNN LRP on data from all dices
"""
lst = []
dices = [
"dice_0002",
# "dice_0003",
"dice_0004",
# "dice_0005",
"dice_0006",
# "dice_0007",
"dice_0008",
# "dice_0009",
"dice_0010",
]
fname = "lrp_hist.pdf"
with PdfPages(fname) as pdf:
plt.close("all")
plt.rc("text", usetex=True)
for d, dice in enumerate(dices):
print(f"DNN LRP on {dice}")
# Load data
Xdev = pd.read_pickle(os.path.join(datadir, f"{dice}_xdev.gz"))
dat = np.load(os.path.join(datadir, f"{dice}.npz"))
z = dat["z"]
# Switch scaler
scaler = joblib.load(os.path.join(datadir, f"{dice}_scaler.pkl"))
Xdev = utilities.switch_scaler(Xdev, scaler, model_scaler)
# LRP
lrp_values = lrp.eval_lrp(Xdev, DNN)
# Compute relevances (TODO: figure out which one to use)
relevances = np.mean(lrp_values, axis=0)
# relevances = np.mean(lrp_values, axis=0) / np.sum(np.mean(lrp_values, axis=0))
# relevances = np.mean(lrp_values / np.sum(lrp_values, axis=1)[:, None], axis=0)
# Store
lst.append(
{
"z": z,
Xdev.columns[0]: relevances[0],
Xdev.columns[1]: relevances[1],
Xdev.columns[2]: relevances[2],
Xdev.columns[3]: relevances[3],
}
)
# Histogram plot of LRP values
nbins = 100
bins = np.linspace(-0.2, 1.0, 100)
for k in range(lrp_values.shape[1]):
hist, bins = np.histogram(lrp_values[:, k], bins=nbins, density=True)
centers = utilities.edges_to_midpoint(bins)
plt.figure(k)
plt.plot(centers, hist, color=cmap[d % len(cmap)])
for k, xname in enumerate(get_xnames()):
plt.figure(k)
plt.title(xname)
plt.tight_layout()
pdf.savefig(dpi=300, bbox_inches="tight")
return pd.DataFrame(lst)
# ========================================================================
def shuffled_input_loss(model, X, Y):
"""Get the loss from shuffling each input column
This provides an estimate of the feature importance of a model by
evaluating the change in the loss metric when input columns are
shuffled one at a time.
Note that this is not a "perfect" way of computing feature
importance. There are many ways to compute feature importance and
there are many failure modes
(ftp://ftp.sas.com/pub/neural/importance.html). One major issue is
collinearity of input variables.
:param model: model
:type model: model
:param X: data for prediction
:type X: dataframe
:param Y: true values
:type Y: dataframe
:return: loss normalized by unshuffled loss
:rtype: dataframe
"""
def loss(true, predicted, metric="rmse"):
if metric == "jsd":
jsd = calculate_jsd(true, predicted)
return np.percentile(jsd, [90])[0]
elif metric == "rmse":
return rmse_metric(true, predicted)
dic = {}
metric = "rmse"
np.random.seed(985_721)
dic["original"] = loss(Y, model.predict(X), metric=metric)
for col in X:
# Shuffle a single column and compute the loss
df = X.copy()
np.random.shuffle(df[col].values)
dic[col] = loss(Y, model.predict(df), metric=metric)
return pd.DataFrame([dic])
# ========================================================================
def prediction_times(models, X, Y):
"""
Get predictions times for different models
:param models: dictionary of models (models must contain predict function)
:type models: dict
:param X: data for prediction
:type X: array
:param Y: true values
:type Y: array
:return: prediction times
:rtype: dataframe
"""
lst = []
N = 10
for key, model in models.items():
# Estimate the prediction time
end = []
for k in range(N):
start = time.time()
mpredict = model.predict(X)
end.append(time.time() - start)
# Calculate the prediction error
jsd = calculate_jsd(Y, mpredict)
jsd90 = np.percentile(jsd, [90])
lst.append({"model": key, "time": np.mean(end) / X.shape[0], "error": jsd90[0]})
return pd.DataFrame(lst)
# ========================================================================
def summarize_training(
ytrain, mtrain, ydev, mdev, fname="summary.log", timing=0.0, dofs=0
):
"""
Summarize training
:param label: method label
:type label: string
:param ytrain: true training values
:type ytrain: array
:param mtrain: predicted training values
:type mtrain: array
:param ydev: true dev values
:type ydev: array
:param mdev: predicted dev values
:type mdev: array
:param fname: log filename
:type fname: str
:param timing: training time
:type timing: float
:param dofs: number of degrees of freedom in model
:type dofs: int
"""
jsd_train = calculate_jsd(ytrain, mtrain)
jsd_dev = calculate_jsd(ydev, mdev)
std_error_train = np.std(np.ravel(ytrain - mtrain) ** 2)
std_error_dev = np.std(np.ravel(ydev - mdev) ** 2)
percentiles = [85, 90, 95]
percentiles_train = np.percentile(jsd_train, percentiles)
percentiles_dev = np.percentile(jsd_dev, percentiles)
msg = (
f"""Training data errors\n"""
f""" MAE: {mean_absolute_error(ytrain, mtrain):e}\n"""
f""" MSE: {mean_squared_error(ytrain, mtrain):e}\n"""
f""" std SE: {std_error_train:e}\n"""
f""" R^2: {r2_score(ytrain, mtrain):.2f}\n"""
f""" JSD 85 percentile: {percentiles_train[0]:5f}\n"""
f""" JSD 90 percentile: {percentiles_train[1]:5f}\n"""
f""" JSD 95 percentile: {percentiles_train[2]:5f}\n"""
f"""\n"""
f"""Dev data errors\n"""
f""" MAE: {mean_absolute_error(ydev, mdev):e}\n"""
f""" MSE: {mean_squared_error(ydev, mdev):e}\n"""
f""" std SE: {std_error_dev:e}\n"""
f""" R^2: {r2_score(ydev, mdev):.2f}\n"""
f""" JSD 85 percentile: {percentiles_dev[0]:5f}\n"""
f""" JSD 90 percentile: {percentiles_dev[1]:5f}\n"""
f""" JSD 95 percentile: {percentiles_dev[2]:5f}\n"""
f"""\n"""
f"""Training time: {timing:5f} seconds\n"""
f"""Model DoFs: {dofs:5d}\n"""
)
# Output and write to file
print(msg)
with open(fname, "w") as f:
f.write(msg)
# ========================================================================
def plot_result(ytrain, mtrain, ydev, mdev, labels, bins, fname="summary.pdf"):
"""
Plot results
:param ytrain: true training values
:type ytrain: array
:param mtrain: predicted training values
:type mtrain: array
:param ydev: true dev values
:type ydev: array
:param mdev: predicted dev values
:type mdev: array
:param labels: PDF labels, i.e. pdf.loc[Xdev.index,Xdev.columns]
:type labels: dataframe
:param bins: bins for Z and C
:type bins: dataframe
:param fname: plot filename
:type fname: str
"""
ytrain = np.asarray(ytrain, dtype=np.float64)
mtrain = np.asarray(mtrain, dtype=np.float64)
ydev = np.asarray(ydev, dtype=np.float64)
mdev = np.asarray(mdev, dtype=np.float64)
with PdfPages(fname) as pdf:
plt.close("all")
# Plot some sample PDF predictions
nc = len(np.unique(bins.Cbins))
nz = len(np.unique(bins.Zbins))
# C = np.reshape(bins.Cbins.values, (nc, nz))
# Z = np.reshape(bins.Zbins.values, (nc, nz))
zbin_edges = utilities.midpoint_to_edges(bins.Zbins)
cbin_edges = utilities.midpoint_to_edges(bins.Cbins)
extent = [
zbin_edges.min(),
zbin_edges.max(),
zbin_edges.min(),
cbin_edges.max(),
]
n = 10
# # Random sort
# np.random.seed(42)
# indices = np.random.randint(low=0, high=ydev.shape[0], size=n)
# Sort by decreasing JSD
jsd_dev = calculate_jsd(ydev, mdev)
indices = np.argsort(-jsd_dev)[:n]
plt.figure(0, figsize=(24, 32))
plt.clf()
for i, idx in enumerate(indices):
ax = plt.subplot(n, 3, 1 + i * 3)
im = plt.imshow(
np.reshape(ydev[idx, :], (nc, nz)),
origin="lower",
extent=extent,
aspect="auto",
)
plt.colorbar(im)
plt.xlabel("Mixture Fraction")
plt.ylabel("Progress Variable")
label = (
f"""index:{labels.iloc[idx].name}\n"""
f"""c ={labels.iloc[idx].C:.6f}\n"""
f"""c''={labels.iloc[idx].Cvar:.6f}\n"""
f"""Z ={labels.iloc[idx].Z:.6f}\n"""
f"""Z''={labels.iloc[idx].Zvar:.6f}"""
)
plt.title(f"True Joint PDF")
style = dict(size=10, color="white", ha="left", va="top")
ax.text(0.02, 0.2, label, **style)
plt.subplot(n, 3, 2 + i * 3)
im = plt.imshow(
np.reshape(mdev[idx, :], (nc, nz)),
origin="lower",
extent=extent,
aspect="auto",
)
plt.colorbar(im)
plt.xlabel("Mixture Fraction")
plt.ylabel("Progress Variable")
plt.title("Predicted Joint PDF")
plt.subplot(n, 3, 3 + i * 3)
err_dev = mdev - ydev
im = plt.imshow(
np.reshape(err_dev[idx, :], (nc, nz)),
origin="lower",
extent=extent,
aspect="auto",
)
plt.colorbar(im)
plt.xlabel("Mixture Fraction")
plt.ylabel("Progress Variable")
plt.title(
"Error in PDF (JSD = {0:f})".format(
jensen_shannon_divergence(ydev[idx, :], mdev[idx, :])
)
)
plt.tight_layout()
pdf.savefig(dpi=300, bbox_inches="tight")
# PDF of the JSD
jsd_train = calculate_jsd(ytrain, mtrain)
jsd_dev = jsd_dev[np.isfinite(jsd_dev)]
jsd_train = jsd_train[np.isfinite(jsd_train)]
bins = np.linspace(0, 0.5, 100)
hist_train, _ = np.histogram(jsd_train, bins=bins, density=True)
hist_dev, _ = np.histogram(jsd_dev, bins=bins, density=True)
cum_train = np.cumsum(hist_train) * np.diff(bins)
cum_dev = np.cumsum(hist_dev) * np.diff(bins)
centers = utilities.edges_to_midpoint(bins)
plt.figure(1)
plt.clf()
plt.plot(centers, hist_train, label="Train")
plt.plot(centers, hist_dev, label="Dev")
plt.legend()
plt.title(f"PDF of JSD")
plt.xlabel("")
plt.tight_layout()
pdf.savefig(dpi=300, bbox_inches="tight")
plt.figure(2)
plt.clf()
plt.plot(centers, cum_train, label="Train")
plt.plot(centers, cum_dev, label="Dev")
plt.legend()
plt.title(f"Cumulative sum of JSD PDF")
plt.xlabel("")
plt.tight_layout()
pdf.savefig(dpi=300, bbox_inches="tight")
# # PDF of the relative errors DOESNT make sense for PDF prediction
# nkde = min(1000, ydev.shape[0])
# kde_train = gaussian_kde(1 - np.ravel(np.ravel(mtrain[:nkde] / ytrain[:nkde])))
# kde_dev = gaussian_kde(1 - np.ravel(np.ravel(mdev[:nkde] / ydev[:nkde])))
# pdf_space = np.linspace(-2e-2, 2e-2, 500)
# plt.figure(1)
# plt.clf()
# plt.semilogy(pdf_space, kde_train(pdf_space), label="Train")
# plt.semilogy(pdf_space, kde_dev(pdf_space), label="Dev")
# plt.legend()
# plt.title(f"{label}: PDF of relative errors")
# plt.xlabel("")
# plt.ylim([1e-5, 1e4])
# plt.tight_layout()
# pdf.savefig(dpi=300, bbox_inches="tight")
# ========================================================================
def plot_scatter(true, predicted, fname="scatter.pdf"):
"""
Make a generic scatter plot of true vs predicted
"""
eps = 1e-13
error = predicted / (true + eps) - 1
rmse = rmse_metric(true, predicted)
lower, upper = np.percentile(error, [5, 95])
bins = np.linspace(-0.5, 0.5, 100)
hist, _ = np.histogram(error, bins=bins, density=True)
centers = utilities.edges_to_midpoint(bins)
with PdfPages(fname) as pdf:
plt.close("all")
plt.figure(0)
plt.scatter(true, predicted, alpha=0.05)
plt.plot([0, np.max(true)], [0, np.max(true)], "-k")
plt.xlabel("True")
plt.ylabel("Predicted")
plt.title(f"RMSE: {rmse:.3e}")
plt.axis("equal")
plt.tight_layout()
pdf.savefig(dpi=300, bbox_inches="tight")
plt.figure(1)
plt.plot(centers, hist)
plt.title(f"""PDF of relative errors (90% in [{lower:.3f}, {upper:.3f}])""")
plt.tight_layout()
pdf.savefig(dpi=300, bbox_inches="tight")
# ========================================================================
def plot_input_space(pdfs, fname="inputs.pdf"):
"""
Make plots of the PDF input space
"""
# Setup
x = pdfs.Z
y = pdfs.C
with PdfPages(fname) as pdf:
plt.close("all")
plt.rc("text", usetex=True)
# definitions for the axes
pad = 0.02
left, width = 0.12, 0.65
bottom, height = 0.1, 0.65
bottom_h = left + width + pad
hist_height = 0.2
# First figure
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, hist_height]
plt.figure(0, figsize=(10, 8))
axs0 = plt.axes(rect_scatter)
axhx = plt.axes(rect_histx)
# Scatter plot
im = axs0.scatter(x, y, c=pdfs.Zvar, marker=".", alpha=0.1, cmap="viridis", rasterized=True)
cbar = plt.colorbar(im, ax=axs0)
cbar.ax.set_title(r"$\widetilde{Z''}$", fontsize=28)
cbar.set_alpha(1.0)
cbar.ax.tick_params(labelsize=22)
cbar.draw_all()
# Histogram on top (resize because of colorbar)
axhx.hist(x, bins=50, density=True)
axhx.tick_params(
axis="both", which="both", bottom=False, top=False, labelbottom=False
)
axhx.set_ylabel(r"$P(\widetilde{Z})$", fontsize=28)
plt.setp(axhx.get_ymajorticklabels(), fontsize=22)
pos_s0 = axs0.get_position()
pos_hx = axhx.get_position()
axhx.set_position([pos_s0.x0, pos_hx.y0, pos_s0.width, pos_hx.height])
axs0.set_xlabel(r"$\widetilde{Z}$", fontsize=28)
axs0.set_ylabel(r"$\widetilde{c}$", fontsize=28)
plt.setp(axs0.get_xmajorticklabels(), fontsize=22)
plt.setp(axs0.get_ymajorticklabels(), fontsize=22)
pdf.savefig(dpi=150)
# Second figure
rect_histy = [left + pos_s0.width + pad, bottom, hist_height, height]
plt.figure(1, figsize=(10, 8))
axs1 = plt.axes(rect_scatter)
axhy = plt.axes(rect_histy)
# Scatter plot
im = axs1.scatter(x, y, c=pdfs.Cvar, marker=".", alpha=0.1, cmap="viridis", rasterized=True)
axs1.tick_params(axis="both", which="both", left=False, labelleft=False)
pos_s1 = axs1.get_position()
axs1.set_position([pos_s0.x0, pos_s1.y0, pos_s0.width, pos_s1.height])
# Histogram to the right
axhy.hist(y, bins=50, density=True, orientation="horizontal")
axhy.tick_params(
axis="both",
which="both",
bottom=True,
top=False,
left=False,
right=False,
labelbottom=True,
labelleft=False,
)
axhy.set_xlabel(r"$P(\widetilde{c})$", fontsize=28)
plt.setp(axhy.get_xmajorticklabels(), fontsize=22)
pos_hy = axhy.get_position()
axhy.set_position([pos_hy.x0, pos_hy.y0, pos_hx.height, pos_hy.height])
# Then colorbar
cbar = plt.colorbar(im, ax=axhy)
cbar.ax.set_title(r"$\widetilde{c''}$", fontsize=28)
cbar.set_alpha(1.0)
cbar.ax.tick_params(labelsize=22)
cbar.draw_all()
axs1.set_xlabel(r"$\widetilde{Z}$", fontsize=28)
plt.setp(axs1.get_xmajorticklabels(), fontsize=22)
plt.setp(axs1.get_ymajorticklabels(), fontsize=22)
pdf.savefig(dpi=150)
# Third figure
rect_scatter = [left, bottom, width, height]
plt.figure(2, figsize=(10, 8))
axs0 = plt.axes(rect_scatter)
# Scatter plot
im = axs0.scatter(x, y, c=pdfs.SRC_PV, marker=".", alpha=0.1, cmap="viridis", rasterized=True)
cbar = plt.colorbar(im, ax=axs0)
cbar.ax.set_title(r"$\widetilde{\dot{\omega}}$", fontsize=28)
cbar.set_alpha(1.0)
cbar.ax.tick_params(labelsize=22)
cbar.draw_all()
axs0.set_xlabel(r"$\widetilde{Z}$", fontsize=28)
axs0.set_ylabel(r"$\widetilde{c}$", fontsize=28)
plt.setp(axs0.get_xmajorticklabels(), fontsize=22)
plt.setp(axs0.get_ymajorticklabels(), fontsize=22)
pdf.savefig(dpi=150)
# ========================================================================
def set_aspect_display_coord(ratio=0.5, ax=None):
"""Set the aspect ratio based on the figure display coordinates"""
if ax is None:
ax = plt.gca()
xleft, xright = ax.get_xlim()
ybottom, ytop = ax.get_ylim()
ax.set_aspect(abs((xright - xleft) / (ybottom - ytop)) * ratio)
# ========================================================================
def plot_pdfs(
pdfs, means, bins, fname="pdfs.pdf", label=False, models=None, legend=False
):
"""
Make plots of the PDFs
"""
nc = len(np.unique(bins.Cbins))
nz = len(np.unique(bins.Zbins))
zbin_centers = np.unique(bins.Zbins)
cbin_centers = np.unique(bins.Cbins)
zbin_edges = utilities.midpoint_to_edges(zbin_centers)
cbin_edges = utilities.midpoint_to_edges(cbin_centers)
extent = [zbin_edges.min(), zbin_edges.max(), zbin_edges.min(), cbin_edges.max()]
y_vars = get_ynames(pdfs)
nlines = 4
nskip = len(cbin_centers) // nlines
with PdfPages(fname) as pdf:
plt.rc("text", usetex=True)
plt.close("all")
fig0 = plt.figure(0)
fig1 = plt.figure(1)
fig2 = plt.figure(2)
ax2 = fig2.add_subplot(111)
fig3 = plt.figure(3)
ax3 = fig3.add_subplot(111)
fig4 = plt.figure(4)
ax4 = fig4.add_subplot(111)
fig5 = plt.figure(5)
ax5 = fig5.add_subplot(111)
for i, idx in enumerate(pdfs.index):
pdfi = np.reshape(pdfs.loc[idx, y_vars].values, (nc, nz))
meansi = np.reshape(means.loc[idx, y_vars].values, (nc, nz))
# Plot PDF
plt.figure(0)
plt.clf()
ax0 = fig0.add_subplot(111)
im = ax0.imshow(
pdfi,
origin="lower",
extent=extent,
aspect="auto",
interpolation="lanczos",
cmap="magma",
)
if label:
labels = (
f"""$\widetilde{{Z}} ={pdfs.loc[idx].Z:.4f}$\n"""
f"""$\widetilde{{Z''}}={pdfs.loc[idx].Zvar:.4f}$\n"""
f"""$\widetilde{{c}} ={pdfs.loc[idx].C:.4f}$\n"""
f"""$\widetilde{{c''}}={pdfs.loc[idx].Cvar:.4f}$\n"""
f"""$\widetilde{{\dot{{\omega}}}}={pdfs.loc[idx].SRC_PV:.4f}$"""
)
print(labels)
style = dict(size=10, color="white", ha="left", va="top")
ax0.text(0.02, 0.2, labels, **style)
cbar = plt.colorbar(im)
cbar.ax.set_title(r"$P(Z=Z^*,c = c^*)$")
ax0.set_yticks([0, 0.05, 0.1, 0.15, 0.2])
ax0.set_xlabel(r"$Z^*$", fontsize=22, fontweight="bold")
ax0.set_ylabel(r"$c^*$", fontsize=22, fontweight="bold")
plt.setp(ax0.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax0.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig0.subplots_adjust(bottom=0.14)
fig0.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
# Plot means
plt.figure(1)
plt.clf()
ax1 = fig1.add_subplot(111)
im = ax1.imshow(
meansi,
origin="lower",
extent=extent,
aspect="auto",
interpolation="lanczos",
cmap="magma",
)
cbar = plt.colorbar(im)
cbar.ax.set_title(r"$\dot{\omega}(Z=Z^*,c = c^*)$")
ax1.set_yticks([0, 0.05, 0.1, 0.15, 0.2])
ax1.set_xlabel(r"$Z^*$", fontsize=22, fontweight="bold")
ax1.set_ylabel(r"$c^*$", fontsize=22, fontweight="bold")
plt.setp(ax1.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax1.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig1.subplots_adjust(bottom=0.14)
fig1.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
# plot marginal distributions
plt.figure(2)
p = ax2.plot(
zbin_centers,
np.sum(pdfi, axis=0),
lw=2,
color=cmap[i],
label=f"""$\widetilde{{\dot{{\omega}}}}={pdfs.loc[idx].SRC_PV:.2f}$""",
)
p[0].set_dashes(dashseq[i])
if models is not None:
p[0].set_color(cmap[-1])
p[0].set_dashes(dashseq[0])
p[0].set_zorder(10)
for m, model in enumerate(models):
p = ax2.plot(
zbin_centers,
np.sum(np.reshape(models[model][i, :], (nc, nz)), axis=0),
lw=2,
color=cmap[m],
)
p[0].set_dashes(dashseq[m])
plt.figure(3)
p = ax3.plot(cbin_centers, np.sum(pdfi, axis=1), lw=2, color=cmap[i])
p[0].set_dashes(dashseq[i])
if models is not None:
p[0].set_color(cmap[-1])
p[0].set_dashes(dashseq[0])
p[0].set_zorder(10)
for m, model in enumerate(models):
p = ax3.plot(
cbin_centers,
np.sum(np.reshape(models[model][i, :], (nc, nz)), axis=1),
lw=2,
color=cmap[m],
)
p[0].set_dashes(dashseq[m])
plt.figure(4)
p = ax4.plot(zbin_centers, np.sum(meansi, axis=0), lw=2, color=cmap[i])
p[0].set_dashes(dashseq[i])
plt.figure(5)
p = ax5.plot(cbin_centers, np.sum(meansi, axis=1), lw=2, color=cmap[i])
p[0].set_dashes(dashseq[i])
plt.figure(2)
if legend:
lgd = ax2.legend()
ax2.set_xlabel(r"$Z^*$", fontsize=22, fontweight="bold")
ax2.set_ylabel(r"$P(Z=Z^*)$", fontsize=22, fontweight="bold")
plt.setp(ax2.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax2.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig2.subplots_adjust(bottom=0.14)
fig2.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
plt.figure(3)
ax3.set_xticks([0, 0.05, 0.1, 0.15, 0.2])
ax3.set_xlabel(r"$c^*$", fontsize=22, fontweight="bold")
ax3.set_ylabel(r"$P(c=c^*)$", fontsize=22, fontweight="bold")
plt.setp(ax3.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax3.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig3.subplots_adjust(bottom=0.14)
fig3.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
plt.figure(4)
# ax4.set_yscale('symlog', linthreshy=1e-3)
ax4.set_yscale("log")
ax4.set_ylim([1e-1, 1e4])
ax4.set_xlabel(r"$Z^*$", fontsize=22, fontweight="bold")
ax4.set_ylabel(
r"$\langle \dot{\omega}|Z=Z^* \rangle$", fontsize=22, fontweight="bold"
)
plt.setp(ax4.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax4.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig4.subplots_adjust(bottom=0.14)
fig4.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
plt.figure(5)
ax5.set_yscale("log")
ax5.set_ylim([1e-1, 1e4])
ax5.set_xticks([0, 0.05, 0.1, 0.15, 0.2])
ax5.set_xlabel(r"$c^*$", fontsize=22, fontweight="bold")
ax5.set_ylabel(
r"$\langle \dot{\omega}|c=c^* \rangle$", fontsize=22, fontweight="bold"
)
plt.setp(ax5.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax5.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig5.subplots_adjust(bottom=0.14)
fig5.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
# ========================================================================
def plot_dice_slices(fname):
"""
Make plots of slices in the dice
:param fname: dice file name
:type fname: str
"""
# Load dice file
dat = np.load(fname)
# Variables
low = dat["low"]
high = dat["high"]
extent = [low[0], high[0], low[1], high[1]]
dx = dat["dx"]
z = dat["z"]
rho = dat["Rho"]
# index of slice
slc = np.s_[:, :, rho.shape[2] // 2]
# Get slices
rho = rho[slc].T
Z = np.clip(dat["Z"][slc].T, 0.0, 1.0)
C = np.clip(dat["C"][slc].T, 0.0, None)
SRC_PV = np.clip(dat["SRC_PV"][slc].T, 0.0, None)
rhoZ = rho * Z
rhoC = rho * C
rhoSRC_PV = rho * SRC_PV
# Filter
width = 32
rhof = ndimage.uniform_filter(rho, size=width)
Zf = ndimage.uniform_filter(rhoZ, size=width) / rhof
Zvarf = ndimage.uniform_filter(rho * (Z - Zf) ** 2, size=width) / rhof
Cf = ndimage.uniform_filter(rhoC, size=width) / rhof
Cvarf = ndimage.uniform_filter(rho * (C - Cf) ** 2, size=width) / rhof
SRC_PVf = ndimage.uniform_filter(rhoSRC_PV, size=width) / rhof
figname = os.path.splitext(fname)[0] + "_slice.pdf"
with PdfPages(figname) as pdf:
plt.close("all")
plt.rc("text", usetex=True)
fields = [
{"field": Zf, "label": "$\\widetilde{Z}$"},
{"field": Zvarf, "label": "$\\widetilde{Z''}$"},
{"field": Cf, "label": "$\\widetilde{c}$"},
{"field": Cvarf, "label": "$\\widetilde{c''}$"},
{"field": SRC_PVf, "label": "$\\widetilde{\\dot{\\omega}}$"},
]
for i, field in enumerate(fields):
fig, (ax0) = plt.subplots(1)
im0 = ax0.imshow(
field["field"], origin="lower", extent=extent, aspect="equal", vmin=0.0
)
cbar = plt.colorbar(im0, ax=ax0)
cbar.ax.set_title(f"""{field["label"]}""", fontsize=22)
cbar.ax.tick_params(labelsize=18)
ax0.set_xlabel(r"$x~[\mathrm{m}]$", fontsize=22, fontweight="bold")
ax0.set_ylabel(r"$y~[\mathrm{m}]$", fontsize=22, fontweight="bold")
ticks = [-0.06, 0, 0.06]
ax0.set_xticks(ticks)
ax0.set_yticks(ticks)
plt.setp(ax0.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax0.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.gcf().subplots_adjust(bottom=0.15)
plt.gcf().subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
# ========================================================================
def plot_jsd(jsd, legend=False):
"""
Make plots of JSD for different models
:param jsd: JSD for different models
:type jsd: dataframe
:param legend: Draw legend on plots
:type legend: bool
"""
fname = "jsd.pdf"
pdf_space = np.linspace(0, 0.7, 500)
bins = np.linspace(0, 0.7, 100)
with PdfPages(fname) as pdf:
plt.close("all")
plt.rc("text", usetex=True)
fig0 = plt.figure(0)
ax0 = fig0.add_subplot(111)
fig1 = plt.figure(1)
ax1 = fig1.add_subplot(111)
lst = []
for k, model in enumerate(jsd):
kde = gaussian_kde(jsd[model])
pkde = kde(pdf_space)
lst.append({"model": model, "jsd90": np.percentile(jsd[model], [90])[0]})
hist, _ = np.histogram(jsd[model], bins=bins, density=True)
centers = utilities.edges_to_midpoint(bins)
cum_hist = np.cumsum(hist) * np.diff(bins)
plt.figure(0)
p = ax0.plot(pdf_space, pkde, lw=2, color=cmap[k], label=model)
p[0].set_dashes(dashseq[k])
plt.figure(1)
p = ax1.plot(centers, cum_hist, lw=2, color=cmap[k], label=model)
p[0].set_dashes(dashseq[k])
df = pd.DataFrame(lst)
print(df.to_latex())
# Format figures
plt.figure(0)
if legend:
lgd = ax0.legend()
ax0.set_xlabel(r"$J$", fontsize=22, fontweight="bold")
ax0.set_ylabel(r"$P(J)$", fontsize=22, fontweight="bold")
plt.setp(ax0.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax0.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig0.subplots_adjust(bottom=0.15)
fig0.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
plt.figure(1)
ax1.set_xlabel(r"$J$", fontsize=22, fontweight="bold")
ax1.set_ylabel(r"$CDF(J)$", fontsize=22, fontweight="bold")
plt.setp(ax1.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax1.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig1.subplots_adjust(bottom=0.15)
fig1.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
# ========================================================================
def plot_dice_predictions(predictions, legend=False):
"""
Make plots for predictions from all dice
:param legend: Draw legend on plots
:type legend: bool
"""
fname = "dice_predictions.pdf"
with PdfPages(fname) as pdf:
plt.close("all")
plt.rc("text", usetex=True)
fig0 = plt.figure(0)
ax0 = fig0.add_subplot(111)
fig1 = plt.figure(1)
ax1 = fig1.add_subplot(111)
for k, model in enumerate(predictions):
plt.figure(0)
p = ax0.plot(
predictions[model].z,
predictions[model].jsd90,
lw=2,
color=cmap[k],
label=model,
marker=markertype[k],
ms=10,
)
p[0].set_dashes(dashseq[k])
plt.figure(1)
p = ax1.plot(
predictions[model].z,
predictions[model].rmse,
lw=2,
color=cmap[k],
label=model,
marker=markertype[k],
ms=10,
)
p[0].set_dashes(dashseq[k])
# Format figures
plt.figure(0)
if legend:
lgd = ax0.legend()
ax0.set_xlabel(r"$z~[\mathrm{m}]$", fontsize=22, fontweight="bold")
ax0.set_ylabel(r"$J_{90}$", fontsize=22, fontweight="bold")
ax0.set_ylim([0, 0.75])
plt.setp(ax0.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax0.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig0.subplots_adjust(bottom=0.15)
fig0.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
plt.figure(1)
ax1.set_xlabel(r"$z~[\mathrm{m}]$", fontsize=22, fontweight="bold")
ax1.set_ylabel(
r"RMSE$(\widetilde{\dot{\omega}})$", fontsize=22, fontweight="bold"
)
plt.setp(ax1.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax1.get_ymajorticklabels(), fontsize=18, fontweight="bold")
fig1.subplots_adjust(bottom=0.15)
fig1.subplots_adjust(left=0.17)
pdf.savefig(dpi=300)
# ========================================================================
def plot_convolution(true, convolutions, bins, legend=False):
"""
Make plots of convolution for different models
:param true: dataframe containing true SRC_PV values
:type true: dataframe
:param convolution: convolution for different models
:type convolution: dataframe
:param legend: Draw legend on plots
:type legend: bool
"""
src_pv = np.asarray(true.SRC_PV, dtype=np.float64)
zbin_centers = np.unique(bins.Zbins)
cbin_centers = | np.unique(bins.Cbins) | numpy.unique |
import numpy as np
def mean_std(data):
mean = sum(data) / len(data)
std = np.std(data)
return mean, std
def min_max(data):
min_value = min(data)
max_value = max(data)
return min_value, max_value
def show_stats_number(data):
mean, std = mean_std(data)
print("Mean: {}".format(mean))
print("Standard Deviation: {}".format(std))
min_value, max_value = min_max(data)
print("Min: {}".format(min_value))
print("Max: {}".format(max_value))
def show_stats(df, label):
data = df[label]
dtype = str(data.dtype)
if dtype.find("int") >= 0 or dtype.find("float") >= 0:
show_stats_number(data)
else:
pass
# show_stats_categories(data)
def reject_outliers(data, m=2):
'''
:param data: numpy array
:param m: std multiplier
:return: data with no outliers based on std
'''
return data[abs(data - np.mean(data)) < m * np.std(data)]
def reject_outliers_percentile(data,index=False):
'''
:param data: numpy array
:return: data with no outliers, values are between 25% and 75% percentiles
'''
len_data=len(data)
data=np.sort(data)
return data[int(len_data*0.25):int(len_data*0.75)]
p25 = | np.percentile(data, 25) | numpy.percentile |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
import json
import pandas as pd
import re
import ir_thermography.thermometry as irt
import matplotlib.ticker as ticker
from scipy import interpolate
from scipy.signal import savgol_filter
from scipy import interpolate
import platform
base_path = r'C:\Users\erick\OneDrive\Documents\ucsd\Postdoc\research\data\firing_tests\heat_flux_calibration\IR Thermography Calibration'
# data_file = 'LT_GR008G_4mTorr-contact-shield_080PCT_60GAIN 2022-05-06_1'
data_file = 'LT_GR008G_6mTorr-contact-shield_100PCT_50GAIN 2022-05-04_1'
time_constant_1, time_constant_2 = 2.1148, 2.1148
emissivity = 1.0 - (36.9 / 100)
tc_min = 0.0
def correct_thermocouple_response(measured_temperature, measured_time, time_constant, debug=False):
n = len(measured_time)
k = int(n / 10)
k = k + 1 if k % 2 == 0 else k
T = savgol_filter(measured_temperature, k, 2)
dTdt = np.gradient(T, measured_time, edge_order=2)
if debug:
for ti, Ti, Ts, dti in zip(measured_time, measured_temperature, T, dTdt):
print(f'{ti:6.3f} s, T = {Ti:6.2f} °C, T_smooth: {Ts:6.3f} dT/dt = {dti:>5.3E}')
# dTdt = savgol_filter(dTdt, k, 3)
r = T + time_constant * dTdt
r = savgol_filter(r, k - 4, 3)
return r
def get_experiment_params(relative_path: str, filename: str):
# Read the experiment parameters
results_csv = os.path.join(relative_path, f'{filename}.csv')
count = 0
params = {}
with open(results_csv) as f:
for line in f:
if line.startswith('#'):
if count > 1:
l = line.strip()
print(l)
if l == '#Data:':
break
pattern1 = re.compile("\s+(.*?):\s(.*?)\s(.*?)$")
pattern2 = re.compile("\s+(.*?):\s(.*?)$")
matches1 = pattern1.findall(l)
matches2 = pattern2.findall(l)
if len(matches1) > 0:
params[matches1[0][0]] = {
'value': matches1[0][1],
'units': matches1[0][2]
}
elif len(matches2) > 0:
params[matches2[0][0]] = {
'value': matches2[0][1],
'units': ''
}
count += 1
return params
if __name__ == '__main__':
if platform.system() == 'Windows':
base_path = r'\\?\\' + base_path
results_path = os.path.join(base_path, 'temperature_data')
data_filetag = data_file
print('results_path: ', results_path)
main_csv = os.path.join(base_path, data_filetag + '.csv')
if not os.path.exists(results_path):
os.makedirs(results_path)
measurements_df = pd.read_csv(main_csv, comment='#').apply(pd.to_numeric)
experiment_params = get_experiment_params(relative_path=base_path, filename=data_file)
photodiode_gain = experiment_params['Photodiode Gain']['value']
laser_power_setting = experiment_params['Laser Power Setpoint']['value']
sample_name = experiment_params['Sample Name']['value']
output_path = os.path.join(results_path, f'{sample_name.upper()}_{laser_power_setting}')
if not os.path.exists(output_path):
os.makedirs(output_path)
with open('plot_style.json', 'r') as file:
json_file = json.load(file)
plot_style = json_file['defaultPlotStyle']
mpl.rcParams.update(plot_style)
thermometry = irt.PDThermometer()
# print(measurements_df)
measurement_time = measurements_df['Measurement Time (s)'].values
trigger_voltage = measurements_df['Trigger (V)'].values
photodiode_voltage = measurements_df['Photodiode Voltage (V)'].values
# for i, p in enumerate(photodiode_voltage):
# if np.isnan(p):
# print(i, measurement_time[i], p)
tc_csv = os.path.join(base_path, data_filetag + '_tcdata.csv')
tc_df = pd.read_csv(tc_csv, comment='#').apply(pd.to_numeric)
tc_time = tc_df['Time (s)'].values
temperature_a = tc_df['TC1 (C)'].values
# temperature_b = tc_df['TC2 (C)'].values
ta_corrected = correct_thermocouple_response(
measured_temperature=temperature_a, measured_time=tc_time, time_constant=time_constant_1
)
t_max_idx = measurement_time <= 3.0
tc_max_idx = tc_time <= np.inf
measurement_time = measurement_time[t_max_idx]
trigger_voltage = trigger_voltage[t_max_idx]
photodiode_voltage = photodiode_voltage[t_max_idx]
tc_time = tc_time[tc_max_idx]
temperature_a = temperature_a[tc_max_idx]
ta_corrected = ta_corrected[tc_max_idx]
trigger_voltage = savgol_filter(trigger_voltage, 5, 4)
irradiation_time_idx = trigger_voltage > 1.5
irradiation_time = measurement_time[irradiation_time_idx]
reflection_signal = np.zeros_like(photodiode_voltage)
sg_window = 9
photodiode_voltage[irradiation_time_idx] = savgol_filter(photodiode_voltage[irradiation_time_idx], sg_window, 2)
t_pulse_max = irradiation_time.max() + 0.2
noise_level = np.abs(photodiode_voltage[measurement_time > t_pulse_max]).max()
print(f"Noise Level: {noise_level:.4f} V")
t0 = irradiation_time.min()
# t0_idx = (np.abs(measurement_time - t0)).argmin() - 1
# t0 = measurement_time[t0_idx]
irradiation_time -= t0
measurement_time -= t0
n = 3
reflective_signal_zero_idx = ( | np.abs(measurement_time) | numpy.abs |
#! /usr/bin/env python
"""Extract and plot channel long profiles.
Plotting functions to extract and plot channel long profiles.
Call all three functions in sequence from the main code.
The functions will return the long profile nodes, return distances upstream of
those nodes, and plot the long profiles, respectively. The former two are, by
necessity, ragged lists of arrays - the number of channels, then the nodes in
that channel.
This module selects channels by taking the largest possible drainages crossing
the grid boundaries. You can specify how many different channels it handles
using the number_of_channels parameter in the channel_nodes function (default
is 1). This may lead to strange outputs if the drainage structure of the output
changes mid-run (e.g., channel piracy). This may be modified in the future.
"""
# DEJH, March 2014.
from six.moves import range
import numpy
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
def channel_nodes(grid, steepest_nodes, drainage_area, flow_receiver, number_of_channels=1, threshold=None):
if threshold == None:
threshold = 2. * numpy.amin(grid.area_of_cell)
boundary_nodes = grid.boundary_nodes
#top_two_pc = len(boundary_nodes)//50
#starting_nodes = boundary_nodes[numpy.argsort(drainage_area[boundary_nodes])[-top_two_pc:]]
starting_nodes = boundary_nodes[numpy.argsort(
drainage_area[boundary_nodes])[-number_of_channels:]]
profile_IDs = []
for i in starting_nodes:
j = i
data_store = []
while 1:
data_store.append(j)
supplying_nodes = | numpy.where(flow_receiver == j) | numpy.where |
from __future__ import print_function
import copy
import os
import sys
import time
import unittest
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import numpy
from six.moves import xrange
import theano
from theano import tensor, config
from theano.sandbox import rng_mrg
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.sandbox.cuda import cuda_available
from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import attr
if cuda_available:
from theano.sandbox.cuda import float32_shared_constructor
# TODO: test gpu
# Done in test_consistency_GPU_{serial,parallel}
# TODO: test MRG_RandomStreams
# Partly done in test_consistency_randomstreams
# TODO: test optimizer mrg_random_make_inplace
# TODO: make tests work when no flags gived. Now need:
# THEANO_FLAGS=device=gpu0,floatX=float32
# Partly done, in test_consistency_GPU_{serial,parallel}
mode = config.mode
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
utt.seed_rng()
# Results generated by Java code using L'Ecuyer et al.'s code, with:
# main seed: [12345]*6 (default)
# 12 streams
# 7 substreams for each stream
# 5 samples drawn from each substream
java_samples = numpy.loadtxt(os.path.join(os.path.split(theano.__file__)[0],
'sandbox',
'samples_MRG31k3p_12_7_5.txt'))
def test_deterministic():
seed = utt.fetch_seed()
sample_size = (10, 20)
test_use_cuda = [False]
if cuda_available:
test_use_cuda.append(True)
for use_cuda in test_use_cuda:
# print 'use_cuda =', use_cuda
R = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
u = R.uniform(size=sample_size)
f = theano.function([], u)
fsample1 = f()
fsample2 = f()
assert not numpy.allclose(fsample1, fsample2)
R2 = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
u2 = R2.uniform(size=sample_size)
g = theano.function([], u2)
gsample1 = g()
gsample2 = g()
assert numpy.allclose(fsample1, gsample1)
assert numpy.allclose(fsample2, gsample2)
def test_consistency_randomstreams():
"""
Verify that the random numbers generated by MRG_RandomStreams
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
test_use_cuda = [False]
if cuda_available:
test_use_cuda.append(True)
for use_cuda in test_use_cuda:
# print 'use_cuda =', use_cuda
samples = []
rng = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
for i in range(n_streams):
stream_samples = []
u = rng.uniform(size=(n_substreams,), nstreams=n_substreams)
f = theano.function([], u)
for j in range(n_samples):
s = f()
stream_samples.append(s)
stream_samples = numpy.array(stream_samples)
stream_samples = stream_samples.T.flatten()
samples.append(stream_samples)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_cpu_serial():
"""
Verify that the random numbers generated by mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_rstate = curr_rstate.copy()
for j in range(n_substreams):
rstate = theano.shared(numpy.array([stream_rstate.copy()],
dtype='int32'))
new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None,
dtype=config.floatX,
size=(1,))
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
rstate.default_update = new_rstate
f = theano.function([], sample)
for k in range(n_samples):
s = f()
samples.append(s)
# next substream
stream_rstate = rng_mrg.ff_2p72(stream_rstate)
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_cpu_parallel():
"""
Verify that the random numbers generated by mrg_uniform, in parallel,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = | numpy.array([seed] * 6, dtype='int32') | numpy.array |
import argparse
import numpy as np
import yaml
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help = 'yaml files')
parser.add_argument('-n', '--name', type=str, help = 'A name for figure title', default="")
parser.add_argument('-o', '--output_name', type=str, help = 'output filename', default="histogram")
parser.add_argument('--nbins', type=int, help = 'distribution nbins', default=20)
opt = parser.parse_args()
with open(opt.file, 'r') as stream:
score_dict = yaml.load(stream, Loader=yaml.FullLoader)
score_name = []
for filename in score_dict:
for metric_name in score_dict[filename]:
score_name.append(metric_name)
break
score_lists = {x:[] for x in score_name}
for filename in score_dict:
for metric_name in score_dict[filename]:
score_lists[metric_name].append(score_dict[filename][metric_name])
# row = 1. column = len(score_name)
fig, axs = plt.subplots(1, len(score_name), sharey=True)
for idx, metric_name in enumerate(score_lists):
mean_val = np.mean(score_lists[metric_name])
std_val = | np.std(score_lists[metric_name]) | numpy.std |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_almost_equal, assert_array_equal,
assert_array_almost_equal)
from scipy.ndimage import convolve1d
from scipy.signal import savgol_coeffs, savgol_filter
from scipy.signal._savitzky_golay import _polyder
def check_polyder(p, m, expected):
dp = _polyder(p, m)
assert_array_equal(dp, expected)
def test_polyder():
cases = [
([5], 0, [5]),
([5], 1, [0]),
([3, 2, 1], 0, [3, 2, 1]),
([3, 2, 1], 1, [6, 2]),
([3, 2, 1], 2, [6]),
([3, 2, 1], 3, [0]),
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
]
for p, m, expected in cases:
check_polyder(np.array(p).T, m, np.array(expected).T)
#--------------------------------------------------------------------
# savgol_coeffs tests
#--------------------------------------------------------------------
def alt_sg_coeffs(window_length, polyorder, pos):
"""This is an alternative implementation of the SG coefficients.
It uses numpy.polyfit and numpy.polyval. The results should be
equivalent to those of savgol_coeffs(), but this implementation
is slower.
window_length should be odd.
"""
if pos is None:
pos = window_length // 2
t = np.arange(window_length)
unit = (t == pos).astype(int)
h = np.polyval(np.polyfit(t, unit, polyorder), t)
return h
def test_sg_coeffs_trivial():
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
h = savgol_coeffs(1, 0)
assert_allclose(h, [1])
h = savgol_coeffs(3, 2)
assert_allclose(h, [0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4)
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1)
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1, use='dot')
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
def compare_coeffs_to_alt(window_length, order):
# For the given window_length and order, compare the results
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
# Also include pos=None.
for pos in [None] + list(range(window_length)):
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
h2 = alt_sg_coeffs(window_length, order, pos=pos)
assert_allclose(h1, h2, atol=1e-10,
err_msg=("window_length = %d, order = %d, pos = %s" %
(window_length, order, pos)))
def test_sg_coeffs_compare():
# Compare savgol_coeffs() to alt_sg_coeffs().
for window_length in range(1, 8, 2):
for order in range(window_length):
compare_coeffs_to_alt(window_length, order)
def test_sg_coeffs_exact():
polyorder = 4
window_length = 9
halflen = window_length // 2
x = np.linspace(0, 21, 43)
delta = x[1] - x[0]
# The data is a cubic polynomial. We'll use an order 4
# SG filter, so the filtered values should equal the input data
# (except within half window_length of the edges).
y = 0.5 * x ** 3 - x
h = savgol_coeffs(window_length, polyorder)
y0 = convolve1d(y, h)
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
# Check the same input, but use deriv=1. dy is the exact result.
dy = 1.5 * x ** 2 - 1
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
y1 = convolve1d(y, h)
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
# Check the same input, but use deriv=2. d2y is the exact result.
d2y = 3.0 * x
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
y2 = convolve1d(y, h)
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
def test_sg_coeffs_deriv():
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
# order 2 or higher polynomial should give exact results.
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
x = i ** 2 / 4
dx = i / 2
d2x = 0.5 * np.ones_like(i)
for pos in range(x.size):
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
def test_sg_coeffs_large():
# Test that for large values of window_length and polyorder the array of
# coefficients returned is symmetric. The aim is to ensure that
# no potential numeric overflow occurs.
coeffs0 = savgol_coeffs(31, 9)
assert_array_almost_equal(coeffs0, coeffs0[::-1])
coeffs1 = savgol_coeffs(31, 9, deriv=1)
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
#--------------------------------------------------------------------
# savgol_filter tests
#--------------------------------------------------------------------
def test_sg_filter_trivial():
""" Test some trivial edge cases for savgol_filter()."""
x = np.array([1.0])
y = savgol_filter(x, 1, 0)
assert_equal(y, [1.0])
# Input is a single value. With a window length of 3 and polyorder 1,
# the value in y is from the straight-line fit of (-1,0), (0,3) and
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_almost_equal(y, [1.0], decimal=15)
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='nearest')
assert_almost_equal(y, [3.0], decimal=15)
x = np.array([1.0] * 3)
y = savgol_filter(x, 3, 1, mode='wrap')
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
def test_sg_filter_basic():
# Some basic test cases for savgol_filter().
x = np.array([1.0, 2.0, 1.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
y = savgol_filter(x, 3, 1, mode='mirror')
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
y = savgol_filter(x, 3, 1, mode='wrap')
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0]])
expected = np.array([[1.0, 4.0 / 3, 1.0],
[2.0, 8.0 / 3, 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
assert_allclose(y, expected.T)
def test_sg_filter_interp_edges():
# Another test with low degree polynomial data, for which we can easily
# give the exact results. In this test, we use mode='interp', so
# savgol_filter should match the exact solution for the entire data set,
# including the edges.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
# Polynomial test data.
x = np.array([t,
3 * t ** 2,
t ** 3 - t])
dx = np.array([np.ones_like(t),
6 * t,
3 * t ** 2 - 1.0])
d2x = np.array([np.zeros_like(t),
6 * | np.ones_like(t) | numpy.ones_like |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import entropy, normaltest, mode, kurtosis, skew, pearsonr, moment
import pandas as pd
from collections import OrderedDict
from feature_extraction.helpers import *
from feature_extraction.type_detection import detect_field_type, data_type_to_general_type, data_types, general_types
np.warnings.filterwarnings('ignore')
field_basic_features_list = [
{'name': 'fid', 'type': 'id'},
{'name': 'field_id', 'type': 'id'},
{'name': 'exists', 'type': 'boolean'},
{'name': 'length', 'type': 'numeric'}
]
for data_type in data_types:
field_basic_features_list.append({
'name': 'data_type_is_{}'.format(data_type), 'type': 'boolean'
})
for general_type in general_types:
field_basic_features_list.append({
'name': 'general_type_is_{}'.format(general_type), 'type': 'boolean'
})
field_existence_features_list = [
{'name': 'has_none', 'type': 'boolean'},
{'name': 'percentage_none', 'type': 'numeric'},
{'name': 'num_none', 'type': 'numeric'},
]
field_uniqueness_features_list = [
{'name': 'num_unique_elements', 'type': 'numeric'},
{'name': 'unique_percent', 'type': 'numeric'},
{'name': 'is_unique', 'type': 'boolean'}
]
field_c_statistical_features_list = [
{'name': 'list_entropy', 'type': 'numeric'},
{'name': 'mean_value_length', 'type': 'numeric'},
{'name': 'median_value_length', 'type': 'numeric'},
{'name': 'min_value_length', 'type': 'numeric'},
{'name': 'max_value_length', 'type': 'numeric'},
{'name': 'std_value_length', 'type': 'numeric'},
{'name': 'percentage_of_mode', 'type': 'numeric'},
]
field_q_statistical_features_list = [
{'name': 'mean', 'type': 'numeric'},
{'name': 'normalized_mean', 'type': 'numeric'},
{'name': 'median', 'type': 'numeric'},
{'name': 'normalized_median', 'type': 'numeric'},
{'name': 'var', 'type': 'numeric'},
{'name': 'std', 'type': 'numeric'},
{'name': 'coeff_var', 'type': 'numeric'},
{'name': 'min', 'type': 'numeric'},
{'name': 'max', 'type': 'numeric'},
{'name': 'range', 'type': 'numeric'},
{'name': 'normalized_range', 'type': 'numeric'},
{'name': 'entropy', 'type': 'numeric'},
{'name': 'gini', 'type': 'numeric'},
{'name': 'q25', 'type': 'numeric'},
{'name': 'q75', 'type': 'numeric'},
{'name': 'med_abs_dev', 'type': 'numeric'},
{'name': 'avg_abs_dev', 'type': 'numeric'},
{'name': 'quant_coeff_disp', 'type': 'numeric'},
{'name': 'skewness', 'type': 'numeric'},
{'name': 'kurtosis', 'type': 'numeric'},
{'name': 'moment_5', 'type': 'numeric'},
{'name': 'moment_6', 'type': 'numeric'},
{'name': 'moment_7', 'type': 'numeric'},
{'name': 'moment_8', 'type': 'numeric'},
{'name': 'moment_9', 'type': 'numeric'},
{'name': 'moment_10', 'type': 'numeric'},
{'name': 'percent_outliers_15iqr', 'type': 'numeric'},
{'name': 'percent_outliers_3iqr', 'type': 'numeric'},
{'name': 'percent_outliers_1_99', 'type': 'numeric'},
{'name': 'percent_outliers_3std', 'type': 'numeric'},
{'name': 'has_outliers_15iqr', 'type': 'boolean'},
{'name': 'has_outliers_3iqr', 'type': 'boolean'},
{'name': 'has_outliers_1_99', 'type': 'boolean'},
{'name': 'has_outliers_3std', 'type': 'boolean'},
{'name': 'normality_statistic', 'type': 'numeric'},
{'name': 'normality_p', 'type': 'numeric'},
{'name': 'is_normal_5', 'type': 'boolean'},
{'name': 'is_normal_1', 'type': 'boolean'},
]
field_name_features_list = [
{'name': 'field_name_length', 'type': 'numeric'},
{'name': 'x_in_name', 'type': 'boolean'},
{'name': 'y_in_name', 'type': 'boolean'},
{'name': 'id_in_name', 'type': 'boolean'},
{'name': 'time_in_name', 'type': 'boolean'},
{'name': 'digit_in_name', 'type': 'boolean'},
{'name': 'dollar_in_name', 'type': 'boolean'},
{'name': 'pound_in_name', 'type': 'boolean'},
{'name': 'euro_in_name', 'type': 'boolean'},
{'name': 'yen_in_name', 'type': 'boolean'},
{'name': 'first_char_uppercase_name', 'type': 'boolean'},
{'name': 'num_uppercase_characters', 'type': 'numeric'},
{'name': 'space_in_name', 'type': 'boolean'},
{'name': 'number_of_words_in_name', 'type': 'numeric'},
]
field_sequence_features_list = [
{'name': 'is_sorted', 'type': 'boolean'},
{'name': 'is_monotonic', 'type': 'boolean'},
{'name': 'sortedness', 'type': 'numeric'},
{'name': 'lin_space_sequence_coeff', 'type': 'numeric'},
{'name': 'log_space_sequence_coeff', 'type': 'numeric'},
{'name': 'is_lin_space', 'type': 'boolean'},
{'name': 'is_log_space', 'type': 'boolean'},
]
all_field_features_list = \
field_basic_features_list + \
field_existence_features_list + \
field_uniqueness_features_list + \
field_c_statistical_features_list + \
field_q_statistical_features_list + \
field_name_features_list + \
field_sequence_features_list
all_field_features_list_names = [x['name'] for x in all_field_features_list]
def get_existence_features(v):
r = OrderedDict([(f['name'], None) for f in field_existence_features_list])
if not len(v):
return r
num_none = sum(1 for e in v if e == 'None')
r['num_none'] = num_none
r['percentage_none'] = num_none / len(v)
r['has_none'] = (num_none > 0)
return r
# Sequence Properties
def get_uniqueness_features(v, field_type, field_general_type):
r = OrderedDict([(f['name'], None)
for f in field_uniqueness_features_list])
if not len(v):
return r
if field_general_type == 'c' or field_type == 'integer':
unique_elements = get_unique(v)
r = {}
r['num_unique_elements'] = unique_elements.size
r['unique_percent'] = (r['num_unique_elements'] / len(v))
r['is_unique'] = (r['num_unique_elements'] == len(v))
return r
def get_statistical_features(v, field_type, field_general_type):
r = OrderedDict([(f['name'], None)
for f in field_c_statistical_features_list + field_q_statistical_features_list])
if not len(v):
return r
if field_general_type == 'c':
r['list_entropy'] = list_entropy(v)
value_lengths = [len(x) for x in v]
r['mean_value_length'] = np.mean(value_lengths)
r['median_value_length'] = np.median(value_lengths)
r['min_value_length'] = np.min(value_lengths)
r['max_value_length'] = np.max(value_lengths)
r['std_value_length'] = np.std(value_lengths)
r['percentage_of_mode'] = (pd.Series(v).value_counts().max() / len(v))
if field_general_type in 'q':
sample_mean = np.mean(v)
sample_median = np.median(v)
sample_var = np.var(v)
sample_min = np.min(v)
sample_max = np.max(v)
sample_std = np.std(v)
q1, q25, q75, q99 = np.percentile(v, [0.01, 0.25, 0.75, 0.99])
iqr = q75 - q25
r['mean'] = sample_mean
r['normalized_mean'] = sample_mean / sample_max
r['median'] = sample_median
r['normalized_median'] = sample_median / sample_max
r['var'] = sample_var
r['std'] = sample_std
r['coeff_var'] = (sample_mean / sample_var) if sample_var else None
r['min'] = sample_min
r['max'] = sample_max
r['range'] = r['max'] - r['min']
r['normalized_range'] = (r['max'] - r['min']) / \
sample_mean if sample_mean else None
r['entropy'] = entropy(v)
r['gini'] = gini(v)
r['q25'] = q25
r['q75'] = q75
r['med_abs_dev'] = np.median(np.absolute(v - sample_median))
r['avg_abs_dev'] = np.mean(np.absolute(v - sample_mean))
r['quant_coeff_disp'] = (q75 - q25) / (q75 + q25)
r['coeff_var'] = sample_var / sample_mean
r['skewness'] = skew(v)
r['kurtosis'] = kurtosis(v)
r['moment_5'] = moment(v, moment=5)
r['moment_6'] = moment(v, moment=6)
r['moment_7'] = moment(v, moment=7)
r['moment_8'] = moment(v, moment=8)
r['moment_9'] = moment(v, moment=9)
r['moment_10'] = moment(v, moment=10)
# Outliers
outliers_15iqr = np.logical_or(
v < (q25 - 1.5 * iqr), v > (q75 + 1.5 * iqr))
outliers_3iqr = np.logical_or(v < (q25 - 3 * iqr), v > (q75 + 3 * iqr))
outliers_1_99 = np.logical_or(v < q1, v > q99)
outliers_3std = np.logical_or(
v < (
sample_mean -
3 *
sample_std),
v > (
sample_mean +
3 *
sample_std))
r['percent_outliers_15iqr'] = np.sum(outliers_15iqr) / len(v)
r['percent_outliers_3iqr'] = | np.sum(outliers_3iqr) | numpy.sum |
import numpy as np
import pandas as pd
from numpy.testing import assert_, assert_equal, assert_allclose, assert_raises
from statsmodels.tsa.arima import specification, params
def test_init():
# Test initialization of the params
# Basic test, with 1 of each parameter
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Test things copied over from spec
assert_equal(p.spec, spec)
assert_equal(p.exog_names, ['a'])
assert_equal(p.ar_names, ['ar.L1'])
assert_equal(p.ma_names, ['ma.L1'])
assert_equal(p.seasonal_ar_names, ['ar.S.L4'])
assert_equal(p.seasonal_ma_names, ['ma.S.L4'])
assert_equal(p.param_names, ['a', 'ar.L1', 'ma.L1', 'ar.S.L4', 'ma.S.L4',
'sigma2'])
assert_equal(p.k_exog_params, 1)
assert_equal(p.k_ar_params, 1)
assert_equal(p.k_ma_params, 1)
assert_equal(p.k_seasonal_ar_params, 1)
assert_equal(p.k_seasonal_ma_params, 1)
assert_equal(p.k_params, 6)
# Initial parameters should all be NaN
assert_equal(p.params, np.nan)
assert_equal(p.ar_params, [np.nan])
assert_equal(p.ma_params, [np.nan])
assert_equal(p.seasonal_ar_params, [np.nan])
assert_equal(p.seasonal_ma_params, [np.nan])
assert_equal(p.sigma2, np.nan)
assert_equal(p.ar_poly.coef, np.r_[1, np.nan])
assert_equal(p.ma_poly.coef, np.r_[1, np.nan])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.reduced_ar_poly.coef, np.r_[1, [np.nan] * 5])
assert_equal(p.reduced_ma_poly.coef, np.r_[1, [np.nan] * 5])
# Test other properties, methods
assert_(not p.is_complete)
assert_(not p.is_valid)
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
desired = {
'exog_params': [np.nan],
'ar_params': [np.nan],
'ma_params': [np.nan],
'seasonal_ar_params': [np.nan],
'seasonal_ma_params': [np.nan],
'sigma2': np.nan}
assert_equal(p.to_dict(), desired)
desired = pd.Series([np.nan] * spec.k_params, index=spec.param_names)
assert_allclose(p.to_pandas(), desired)
# Test with different numbers of parameters for each
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(3, 1, 2), seasonal_order=(5, 1, 6, 4))
p = params.SARIMAXParams(spec=spec)
# No real need to test names here, since they are already tested above for
# the 1-param case, and tested more extensively in test for
# SARIMAXSpecification
assert_equal(p.k_exog_params, 2)
assert_equal(p.k_ar_params, 3)
assert_equal(p.k_ma_params, 2)
assert_equal(p.k_seasonal_ar_params, 5)
assert_equal(p.k_seasonal_ma_params, 6)
assert_equal(p.k_params, 2 + 3 + 2 + 5 + 6 + 1)
def test_set_params_single():
# Test setting parameters directly (i.e. we test setting the AR/MA
# parameters by setting the lag polynomials elsewhere)
# Here each type has only a single parameters
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
def check(is_stationary='raise', is_invertible='raise'):
assert_(not p.is_complete)
assert_(not p.is_valid)
if is_stationary == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
else:
assert_equal(p.is_stationary, is_stationary)
if is_invertible == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
else:
assert_equal(p.is_invertible, is_invertible)
# Set params one at a time, as scalars
p.exog_params = -6.
check()
p.ar_params = -5.
check()
p.ma_params = -4.
check()
p.seasonal_ar_params = -3.
check(is_stationary=False)
p.seasonal_ma_params = -2.
check(is_stationary=False, is_invertible=False)
p.sigma2 = -1.
# Finally, we have a complete set.
assert_(p.is_complete)
# But still not valid
assert_(not p.is_valid)
assert_equal(p.params, [-6, -5, -4, -3, -2, -1])
assert_equal(p.exog_params, [-6])
assert_equal(p.ar_params, [-5])
assert_equal(p.ma_params, [-4])
assert_equal(p.seasonal_ar_params, [-3])
assert_equal(p.seasonal_ma_params, [-2])
assert_equal(p.sigma2, -1.)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, 5])
assert_equal(p.ma_poly.coef, np.r_[1, -4])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, 3])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, -2])
# (1 - a L) (1 - b L^4) = (1 - a L - b L^4 + a b L^5)
assert_equal(p.reduced_ar_poly.coef, np.r_[1, 5, 0, 0, 3, 15])
# (1 + a L) (1 + b L^4) = (1 + a L + b L^4 + a b L^5)
assert_equal(p.reduced_ma_poly.coef, np.r_[1, -4, 0, 0, -2, 8])
# Override again, one at a time, now using lists
p.exog_params = [1.]
p.ar_params = [2.]
p.ma_params = [3.]
p.seasonal_ar_params = [4.]
p.seasonal_ma_params = [5.]
p.sigma2 = [6.]
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
| assert_equal(p.ar_params, [2]) | numpy.testing.assert_equal |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
from scipy.optimize import curve_fit
from auspex.log import logger
from copy import copy
import matplotlib.pyplot as plt
from .fits import AuspexFit, Auspex2DFit
from .signal_analysis import KT_estimation
class RabiAmpFit(AuspexFit):
"""A fit to a Rabi amplitude curve, assuming a cosine model.
"""
xlabel = "Amplitude"
ylabel = r"<$\sigma_z$>"
title = "Rabi Amp Fit"
@staticmethod
def _model(x, *p):
return p[0] - p[1]*np.cos(2*np.pi*p[2]*(x - p[3]))
def _initial_guess(self):
#seed Rabi frequency from largest FFT component
N = len(self.ypts)
yfft = np.fft.fft(self.ypts)
f_max_ind = np.argmax(np.abs(yfft[1:N//2]))
f_0 = 0.5 * max([1, f_max_ind]) / self.xpts[-1]
amp_0 = 0.5*(self.ypts.max() - self.ypts.min())
offset_0 = np.mean(self.ypts)
phase_0 = 0
if self.ypts[N//2 - 1] > offset_0:
amp_0 = -amp_0
return [offset_0, amp_0, f_0, phase_0]
def _fit_dict(self, p):
return {"y0": p[0],
"Api": p[1],
"f": p[2],
"phi": p[3]}
def __str__(self):
return "y0 - Api*cos(2*pi*f*(t - phi))"
@property
def pi_amp(self):
"""Returns the pi-pulse amplitude of the fit.
"""
return 0.5/self.fit_params["f"]
def annotation(self):
return r"$A_\pi$ = {0:.2e} {1} {2:.2e}".format(self.pi_amp, chr(177), self.fit_errors["Api"])
class RabiWidthFit(AuspexFit):
"""Fit to a single-frequency decaying cosine for fitting Rabi-vs-time experiments
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = "Rabi Width Fit"
@staticmethod
def _model(x, *p):
return p[0] + p[1]*np.exp(-x/p[2])*np.cos(2*np.pi*p[3]*(x - p[4]))
def _initial_guess(self):
frabi, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 1)
offset = np.average(self.xpts)
amp = np.max(self.ypts)
trabi = self.xpts[np.size(self.ypts) // 3]# assume Trabi is 1/3 of the scan
phase = 90.0
return [offset, amp, trabi, frabi[0], phase]
def _fit_dict(self, p):
return {"y0": p[0],
"A": p[1],
'T': p[2],
"f": p[3],
"phi": p[4]}
def __str__(self):
return "y0 + A*exp(-x/T)*cos(2*pi*f*(t - phi))"
@property
def t_rabi(self):
return self.fit_params["T"]
def annotation(self):
return r"$T_\pi$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["T"], chr(177), self.fit_errors["T"])
class T1Fit(AuspexFit):
"""Fit to a decaying exponential for T1 measurement experiments.
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = r"$T_1$ Fit"
@staticmethod
def _model(x, *p):
return p[0]*np.exp(-x/p[1]) + p[2]
def _initial_guess(self):
## Initial guess using method of linear regression via integral equations
## https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
N = len(self.xpts)
S = np.zeros(N)
for j in range(2, N):
S[j] = S[j-1] + 0.5*((self.ypts[j] + self.ypts[j-1]) *
(self.xpts[j] - self.xpts[j-1]))
xs = self.xpts - self.xpts[0]
ys = self.ypts - self.ypts[0]
M = np.array([[np.sum(xs**2), np.sum(xs * S)], [np.sum(xs * S), np.sum(S**2)]])
B1 = (np.linalg.inv(M) @ np.array([np.sum(ys * xs), np.sum(ys * S)]).T)[1]
theta = np.exp(B1 * self.xpts)
M2 = np.array([[N, np.sum(theta)], [np.sum(theta), np.sum(theta**2)]])
A = np.linalg.inv(M2) @ np.array([np.sum(self.ypts), np.sum(self.ypts * theta)]).T
return [A[1], -1.0/B1, A[0]]
def _fit_dict(self, p):
return {"A": p[0], "T1": p[1], "A0": p[2]}
def __str__(self):
return "A0 + A*exp(-t/T1)"
@property
def T1(self):
"""Return the measured T1 (i.e. decay constant of exponential).
"""
return self.fit_params["T1"]
def make_plots(self):
"""Create plot on both linear and semilog scale
"""
logger.info("Semilog plot of |1> state probability requires calibrated data.")
plt.figure(figsize=(2*6.4, 4.8))
plt.subplot(121)
plt.plot(self.xpts, self.ypts, ".", markersize=15, label="Data")
plt.plot(self.xpts, self.model(self.xpts), "-", linewidth=3, label="Fit")
plt.xlabel(self.xlabel, fontsize=14)
plt.ylabel(self.ylabel, fontsize=14)
plt.annotate(self.annotation(), xy=(0.4, 0.10), xycoords='axes fraction', size=12)
plt.subplot(122)
plt.semilogy(self.xpts, -1/2*(self.ypts - self.fit_params["A0"]), ".", markersize=15, label="Data")
plt.semilogy(self.xpts, -1/2*(self.model(self.xpts) - self.fit_params["A0"]), "-", linewidth=3, label="Fit")
plt.xlabel(self.xlabel, fontsize=14)
plt.ylabel('|1> probability', fontsize=14)
plt.suptitle(self.title, fontsize=14)
def annotation(self):
return r"$T_1$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["T1"], chr(177), self.fit_errors["T1"])
class RamseyFit(AuspexFit):
"""Fit to a Ramsey experiment using either a one or two frequency decaying
sine model.
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = "Ramsey Fit"
def __init__(self, xpts, ypts, two_freqs=True, AIC=True, make_plots=False, force=False, ax=None):
"""One or two frequency Ramsey experiment fit. If a two-frequency fit is selected
by the user or by comparing AIC scores, fit parameters are returned as tuples instead
of single numbers.
Args:
xpts (numpy.array): Time data points.
ypts (numpy.array): Qubit measurements.
two_freqs (Bool): If true, attempt a two-frequency fit of the data.
AIC (Bool): Decide between one and two frequency fits using the Akaike
information criterion.
make_plots (Bool): Display a plot of data and fit result.
ax (Axes, optional): Axes on which to draw plot. If None, new figure is created
force (Bool): Force the selection of a two-frequency fit regardless of AIC score.
"""
self.AIC = AIC
self.dict_option = two_freqs
self.two_freqs = two_freqs
self.force = force
self.plots = make_plots
self.ax = ax
assert len(xpts) == len(ypts), "Length of X and Y points must match!"
self.xpts = xpts
self.ypts = ypts
self._do_fit()
def _initial_guess_1f(self):
freqs, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 1)
return [freqs[0], abs(amps[0]), Tcs[0], np.angle(amps[0]), np.mean(self.ypts)]
def _initial_guess_2f(self):
freqs, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 2)
return [*freqs, *abs(amps), *Tcs, *np.angle(amps), np.mean(self.ypts)]
@staticmethod
def _ramsey_1f(x, f, A, tau, phi, y0):
return A*np.exp(-x/tau)*np.cos(2*np.pi*f*x + phi) + y0
@staticmethod
def _model_2f(x, *p):
return (RamseyFit._ramsey_1f(x, p[0], p[2], p[4], p[6], p[8]) + RamseyFit._ramsey_1f(x, p[1], p[3], p[5], p[7], p[8]))
@staticmethod
def _model_1f(x, *p):
return RamseyFit._ramsey_1f(x, p[0], p[1], p[2], p[3], p[4])
def _aicc(self, e, k, n):
return 2*k+e+(2*k*(k+1))/(n-k-1)
def _do_fit(self):
if self.two_freqs:
self.dict_option = True
self._initial_guess = self._initial_guess_2f
self._model = self._model_2f
try:
super()._do_fit()
two_freq_chi2 = self.sq_error
except:
self.two_freqs = False
logger.info("Two-frequency fit failed. Trying single-frequency fit.")
if self.two_freqs and self.AIC:
#Compare the one and two frequency fits
self.dict_option = False
self._initial_guess = self._initial_guess_1f
self._model = self._model_1f
super()._do_fit()
one_freq_chi2 = self.sq_error
aic = self._aicc(two_freq_chi2, 9, len(self.xpts)) - self._aicc(one_freq_chi2, 5, len(self.xpts))
if aic > 0 and not self.force:
self.two_freqs = False
rl = 100*np.exp(-aic/2)
logger.info(f"Selecting one-frequency fit with relative likelihood = {rl:.2f}%")
if rl>33:
logger.info("Relative likelihood of 2nd frequency high, take more averages or set force = True.")
else:
self.dict_option = True
self._initial_guess = self._initial_guess_2f
self._model = self._model_2f
super()._do_fit()
if not self.two_freqs:
self.dict_option = False
self._initial_guess = self._initial_guess_1f
self._model = self._model_1f
super()._do_fit()
if self.plots:
self.make_plots()
def annotation(self):
if self.two_freqs:
return r"$T_2$ = {0:.2e} {1} {2:.2e} "'\n'"$T_2$ = {3:.2e} {4} {5:.2e}".format(self.fit_params["tau1"], chr(177), self.fit_errors["tau1"], self.fit_params["tau2"], chr(177), self.fit_errors["tau2"])
else:
return r"$T_2$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["tau"], chr(177), self.fit_errors["tau"])
@property
def T2(self):
if self.two_freqs:
return self.fit_params["tau1"], self.fit_params["tau2"]
else:
return self.fit_params["tau"]
@property
def ramsey_freq(self):
if self.two_freqs:
return self.fit_params["f1"], self.fit_params["f2"]
else:
return self.fit_params["f"]
def _fit_dict(self, p):
if self.dict_option:
return {"f1": p[0],
"A1": p[2],
"tau1": p[4],
"phi1": p[6],
"f2": p[1],
"A2": p[3],
"tau2": p[5],
"phi2": p[7],
"y0": p[8]}
else:
return {"f": p[0],
"A": p[1],
"tau": p[2],
"phi": p[3],
"y0": p[4]}
class SingleQubitRBFit(AuspexFit):
"""Fit to an RB decay curve using the model A*(r^n) + B
"""
ylabel = r"<$\sigma_z$>"
title = "Single Qubit RB Fit"
def __init__(self, lengths, data, make_plots=False, log_scale_x=True, smart_guess=True, bounded_fit=True, ax=None):
self.lengths = sorted(list(set(lengths)))
repeats = len(data) // len(self.lengths)
xpts = np.array(self.lengths)
ypts = np.mean(np.reshape(data,(len(self.lengths),repeats)),1)
self.data = data
self.data_points = np.reshape(data,(len(self.lengths),repeats))
self.errors = np.std(self.data_points, 1)
self.log_scale_x = log_scale_x
self.ax = ax
self.smart_guess = smart_guess
if log_scale_x:
self.xlabel = r"$log_2$ Clifford Number"
else:
self.xlabel = "Clifford Number"
if bounded_fit:
self.bounds = ((0, -np.inf, 0), (1, np.inf, 1))
super().__init__(xpts, ypts, make_plots=make_plots, ax=ax)
@staticmethod
def _model(x, *p):
return p[0] * (1-p[1])**x + p[2]
def _initial_guess(self):
if self.smart_guess:
## Initial guess using method of linear regression via integral equations
## https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
N = len(self.xpts)
S = np.zeros(N)
for j in range(2, N):
S[j] = S[j-1] + 0.5*((self.ypts[j] + self.ypts[j-1]) *
(self.xpts[j] - self.xpts[j-1]))
xs = self.xpts - self.xpts[0]
ys = self.ypts - self.ypts[0]
M = np.array([[np.sum(xs**2), np.sum(xs * S)],
[np.sum(xs * S), np.sum(S**2)]])
B1 = (np.linalg.inv(M) @ np.array([ | np.sum(ys * xs) | numpy.sum |
from __future__ import annotations
from dataclasses import dataclass
from functools import reduce
from typing import Literal
import numpy as np
from numpy.typing import NDArray
from chemex.configuration.data import RelaxationDataSettings
from chemex.configuration.experiment import CpmgSettingsEvenNcycs
from chemex.configuration.experiment import ExperimentConfig
from chemex.configuration.experiment import ToBeFitted
from chemex.containers.data import Data
from chemex.containers.dataset import load_relaxation_dataset
from chemex.experiments.configurations import configurations
from chemex.experiments.descriptions import descriptions
from chemex.experiments.factories import Creators
from chemex.experiments.factories import factories
from chemex.filterers import PlanesFilterer
from chemex.nmr.liouvillian import Basis
from chemex.nmr.liouvillian import LiouvillianIS
from chemex.nmr.spectrometer import Spectrometer
from chemex.parameters.spin_system import SpinSystem
from chemex.plotters import CpmgPlotter
from chemex.printers.data import CpmgPrinter
# Type definitions
NDArrayFloat = NDArray[np.float_]
NDArrayBool = NDArray[np.bool_]
EXPERIMENT_NAME = "cpmg_hn_dq_zq"
class CpmgHNDqZqSettings(CpmgSettingsEvenNcycs):
name: Literal["cpmg_hn_dq_zq"]
time_t2: float
carrier_h: float
carrier_n: float
pw90_h: float
pw90_n: float
dq_flg: bool
observed_state: Literal["a", "b", "c", "d"] = "a"
@property
def detection(self) -> str:
if self.dq_flg:
return f"[2ixsx_{self.observed_state}] - [2iysy_{self.observed_state}]"
else:
return f"[2ixsx_{self.observed_state}] + [2iysy_{self.observed_state}]"
class CpmgHNDqZqConfig(ExperimentConfig[CpmgHNDqZqSettings, RelaxationDataSettings]):
@property
def to_be_fitted(self) -> ToBeFitted:
state = self.experiment.observed_state
return ToBeFitted(
rates=[f"r2mq_is_{state}", f"mu_is_{state}"],
model_free=[f"tauc_{state}", f"s2_{state}"],
)
def build_spectrometer(
config: CpmgHNDqZqConfig, spin_system: SpinSystem
) -> Spectrometer:
settings = config.experiment
conditions = config.conditions
basis = Basis(type="ixyzsxyz", spin_system="nh")
liouvillian = LiouvillianIS(spin_system, basis, conditions)
spectrometer = Spectrometer(liouvillian)
spectrometer.carrier_i = settings.carrier_n
spectrometer.carrier_s = settings.carrier_h
spectrometer.b1_i = 1 / (4.0 * settings.pw90_n)
spectrometer.b1_s = 1 / (4.0 * settings.pw90_h)
spectrometer.detection = settings.detection
return spectrometer
@dataclass
class CpmgHNDqZqSequence:
settings: CpmgHNDqZqSettings
def _get_tau_cps(self, ncycs: np.ndarray) -> dict[float, float]:
ncycs_no_ref = ncycs[ncycs > 0.0]
return dict(
zip(
ncycs_no_ref,
self.settings.time_t2 / (4.0 * ncycs_no_ref)
- 7.0 / 3.0 * self.settings.pw90_n,
)
)
def _get_phases(self, ncyc: float) -> tuple[np.ndarray, np.ndarray]:
nu_cpmg = ncyc / self.settings.time_t2
if nu_cpmg < 51.0:
cp_phases1 = [0, 1, 0, 1]
cp_phases2 = [1, 0, 1, 0]
elif nu_cpmg < 255.0:
cp_phases1 = [0]
cp_phases2 = [1]
else:
cp_phases1 = [0, 1, 0, 1, 1, 0, 1, 0]
cp_phases2 = [1, 0, 1, 0, 0, 1, 0, 1]
indexes = np.arange(2 * int(ncyc))
phases1 = np.take(cp_phases1, | np.flip(indexes) | numpy.flip |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def burgers( method, nx, nt, t_max, bc, param, plot = False ):
"""
This function solves the time-dependent inviscid Burgers equation.
It also solves sensitivity equations for derivatives of the PDE solution
with respect to parameters mu1 and mu2, and computes the values/gradients
of some functions of the PDE solution.
The code for solving Burgers' equation is ported from "BURGERS_TIME_INVISCID"
by <NAME> and <NAME>.
Discussion:
A number of solution methods are available for the user to choose.
Modified:
4 February 2020
Author:
<NAME>
Inputs:
Integer METHOD.
1, Upwind nonconservative;
2, Upwind conservative;
3, Godunov.
Integer NX, the number of nodes.
Integer NT, the number of time steps.
Real T_MAX, the maximum time.
Integer BC, defines the boundary conditions.
0, Dirichlet at A, Dirichlet at B.
1, Dirichlet at A, nothing/Neumann at B.
Array PARAM, contains two parameters:
param(1): left-end boundary condition.
param(2): parameter in source term.
Bool plot (false by default), enables/disables plot of solution over time.
Output:
Real U(5, NT+1, NX). U[0] is the solution at each time and node.
U[1], U[2] are solution derivatives w.r.t. mu1, mu2 at each time and node.
U[3] is the time derivative of the solution at each time and node.
U[4] is the spatial derivative of the solution at each time and node.
Real Dsav(5, NT+1, NParam+1+NX), stores [parameter value, time, quantity].
Real Q2, value of total kinetic energy, 0.5 * \int_x \int_t w(x, t, \mu)^2.
Real gradQ2(3), gradient of Q2(t, \mu).
Real Q3alt, value of integrated kinetic energy at right endpoint,
0.5 * \int_t w(100, t, \mu)^2.
Real gradQ3alt(3), gradient of Q3alt(t, \mu).
Reference:
Burgers equation by <NAME>
Test problem from
A Trajectory Piecewise-Linear Approach to Model Order Reduction
of Nonlinear Dynamical Systems, by <NAME>
and
Model reduction of dynamcial systems on nonlinear manifolds using
deep convolutional autoencoders by <NAME> and <NAME>
Demo:
burgers ( 2, 256, 500, 35, 1, [4.3, 0.021] );
"""
def f(u): return 0.5 * u**2
def df(u): return u
def nf(u, v):
ustar = u
if np.ndim(u) == 0:
u = np.array( [u] )
ustar = u;
if np.ndim(v) == 0:
v = np.array( [v] );
for i in range( len(u) ): #i = 1:len(u)
if u[i] >= v[i]:
if ( u[i] + v[i] ) / 2 > 0:
ustar[i] = u[i]
else:
ustar[i] = v[i]
else:
if u[i] > 0:
ustar[i] = u[i]
elif v[i] < 0:
ustar[i] = v[i]
else:
ustar[i] = 0
return 0.5 * ustar**2
a, b = 0, 100
dx = ( b - a ) / nx
x = np.linspace(a, b, nx)
dt = t_max / nt
g = 0.02 * np.exp( param[1] * x )
# Set up the initial solution values and parameter gradient values.
U = np.zeros( (5, nt + 1, nx) )
Dsav = np.zeros( (5, nt + 1, nx + len(param) + 1) )
u0 = np.ones_like(x)
umu10 = np.zeros_like(x)
umu20 = np.zeros_like(x)
ut0 = np.zeros_like(x)
ux0 = np.zeros_like(x)
# impose BC at left endpoint
u0[0] = param[0]
umu10[0] = 1
umu20[0] = 0
ut0[0] = 0
ux0[0] = 0
# Could vectorize this more, but I think it's more readable like this.
U[0][0, :] = u0
U[1][0, :] = umu10
U[2][0, :] = umu20
U[3][0, :] = ut0
U[4][0, :] = ux0
Dsav[0][0, :] = np.concatenate( (param, 0, u0), axis = None )
Dsav[1][0, :] = np.concatenate( (param, 0, umu10), axis = None )
Dsav[2][0, :] = np.concatenate( (param, 0, umu20), axis = None )
Dsav[3][0, :] = np.concatenate( (param, 0, ut0), axis = None )
Dsav[4][0, :] = np.concatenate( (param, 0, ux0), axis = None )
u = u0
umu1 = umu10
umu2 = umu20
ut = ut0
ux = ux0
unew = 0 * u
umu1new = 0 * umu1
umu2new = 0 * umu2
utnew = 0 * ut
uxnew = 0 * ux
# Generate plot data
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
upperlim = 1.4 * param[0]
ax.set_ylim( [0, upperlim] )
line1, = ax.plot(x, u0, 'r-')
# Implementation of the numerical methods.
irange = [ y + 1 for y in range(nt) ] # so irange starts at 1
for i in irange:
# Upwind nonconservative.
if method == 1:
if bc == 0 or bc == 1:
unew[0] = u[0]
umu1new[0] = umu1[0]
umu2new[0] = umu2[0]
utnew[0] = ut[0]
unew[1:] = u[1:] + dt * ( -1/dx * u[1:] * ( u[1:] - u[:-1] ) + g[1:] )
# Compute parameter derivatives
umu1new[1:] = umu1[1:] - dt * ( umu1[1:] * ( u[1:]
- u[:-1]) / dx + u[1:] * ( umu1[1:] - umu1[:-1] ) / dx )
umu2new[1:] = umu2[1:] + dt * ( x[1:] * g[1:] - umu2[1:] * ( u[1:]
- u[:-1] ) / dx - u[1:] * ( umu2[1:] - umu2[:-1] ) / dx )
utnew[1:] = -1/dx * u[1:] * ( u[1:] - u[:-1] ) + g[1:]
# Upwind conservative.
if method == 2:
if bc == 0 or bc == 1:
unew[0] = u[0]
umu1new[0] = umu1[0]
umu2new[0] = umu2[0]
utnew[0] = ut[0]
uxnew[0] = ux[0]
utnew[1:] = -1/dx * ( f(u[1:]) - f(u[:-1]) ) + g[1:]
unew[1:] = u[1:] + dt * utnew[1:]
#unew[1:] = u[1:] + dt * ( -1/dx * ( f(u[1:]) - f(u[:-1]) ) + g[1:] )
# Solve for parameter derivatives
umu1new[1:] = umu1[1:] - dt * (1/dx) * ( umu1[1:] * u[1:]
- u[:-1] * umu1[:-1] )
umu2new[1:] = umu2[1:] + dt * x[1:] * g[1:] - dt * (1/dx) * ( umu2[1:]
* u[1:] - u[:-1] * umu2[:-1] )
uxnew[1:-1] = (u[2:] - u[:-2]) / (2*dx)
uxnew[-1] = (u[-1] - u[-2]) / dx
# Godunov
if method == 3:
if bc == 0 or bc == 1:
unew[0] = u[0]
umu1new[0] = umu1[0]
umu2new[0] = umu2[0]
utnew[0] = ut[0]
unew[1:-1] = u[1:-1] + dt * ( -1/dx * ( nf(u[1:-1], u[2:])
- nf(u[:-2], u[1:-1]) ) + g[1:-1] )
if ( bc == 0 or bc == 2 ):
unew[-1] = u[-1]
elif ( bc == 1 ):
unew[-1] = u[-1] + dt * ( -1/dx * ( nf(u[-1], u[-1])
- nf(u[-2], u[-1]) ) + g[-1] )
# assume neumann at the rightend
# Solve for parameter derivatives (same as method 2)
umu1new[1:] = umu1[1:] - dt * (1/dx) * ( umu1[1:] * u[1:]
- u[:-1] * umu1[:-1] )
#
umu2new[1:] = umu2[1:] + dt * x[1:] * g[1:] - dt * (1/dx) * ( umu2[1:]
* u[1:] - u[:-1] * umu2[:-1] )
utnew[1:] = -1/dx * ( f(u[1:]) - f(u[:-1]) ) + g[1:]
# Save the latest result.
u = unew
umu1 = umu1new
umu2 = umu2new
ut = utnew
ux = uxnew
U[0][i, :] = u
U[1][i, :] = umu1
U[2][i, :] = umu2
U[3][i, :] = ut
U[4][i, :] = ux
Dsav[0][i, :] = np.concatenate( (param, dt*i, u), axis = None )
Dsav[1][i, :] = np.concatenate( (param, dt*i, umu1), axis = None )
Dsav[2][i, :] = np.concatenate( (param, dt*i, umu2), axis = None )
Dsav[3][i, :] = np.concatenate( (param, dt*i, ut), axis = None )
Dsav[4][i, :] = np.concatenate( (param, dt*i, ux), axis = None )
# Plot the profile curve.
if plot:
line1.set_ydata(u)
fig.canvas.draw()
fig.canvas.flush_events()
plt.pause(0.001)
# Generate quantity of interest Q2 (total kinetic energy) and its derivatives
# All trapezoidal Riemann sums in space and left sums in time
tempLeft = np.sum( U[0][:, :-1]**2, 1 ) * dx # left row sum
tempRight = np.sum( U[0][:, 1:]**2, 1 ) * dx # right row sum
Q2 = 0.25 * np.sum( tempLeft[:-1] + tempRight[:-1] ) * dt # left time sum
dQ2dT = 0.25 * np.sum( U[0][-1, :-1]**2 + U[0][-1, 1:]**2 ) * dx
# derivative in the final time
tempLeft = np.sum( U[0][:, :-1] * U[1][:, :-1], 1 ) * dx
tempRight = np.sum( U[0][:, 1:] * U[1][:, 1:], 1 ) * dx
dQ2dmu1 = 0.5 * np.sum( tempLeft[:-1] + tempRight[:-1] ) * dt
# derivative in mu1
tempLeft = np.sum( U[0][:, :-1] * U[2][:, :-1], 1 ) * dx
tempRight = np.sum( U[0][:, 1:] * U[2][:, 1:], 1 ) * dx
dQ2dmu2 = 0.5 * np.sum( tempLeft[:-1] + tempRight[:-1] ) * dt
# derivative in mu2
gradQ2 = np.array( [dQ2dT, dQ2dmu1, dQ2dmu2] )
# Generate quantity of interest Q3alt (integrated kinetic energy density
# at right endpoint) and its derivatives
Q3alt = 0.5 * | np.sum( U[0][:-1, -1]**2 ) | numpy.sum |
import gzip
from abc import ABC
from pathlib import Path
from urllib import request
import numpy as np
from slick_dnn.data import Dataset
class MNISTDataSet(Dataset, ABC):
@staticmethod
def _load_mnist(images_path: str, labels_path: str, flatten_input, one_hot_output):
with open(images_path, 'rb') as f:
new_shape = (-1, 28 * 28) if flatten_input else (-1, 1, 28, 28)
data = np.frombuffer(f.read(), np.uint8, offset=16).reshape(new_shape)
with open(labels_path, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
if one_hot_output:
b = np.zeros((labels.size, 10))
b[ | np.arange(labels.size) | numpy.arange |
import numpy as np
class AutoDiff():
"""Class which performs forward automatic differentiation
ATTRIBUTES
==========
val: the value of the object, can be scalar of vector
der: the derivative of the object, can be a scalar, vector or array
Optional
variables: number of variables used in a multivariable function
position : position of this variable in the function
EXAMPLES
========
>>>x = AutoDiff(4)
>>>x.val
4
>>>x.der
1
>>>f = x**2 +2x
>>>f.der
10
>>> x1 = AutoDiff(2,1,2,0)
>>> x2 = AutoDiff(4,1,2,1)
>>> f = x1**2 +2*x2
[4. 2.]
>>> x1 = AutoDiff(2,[1,0])
>>> x2 = AutoDiff(4,[0,1])
>>> f = x1**2 +2*x2
[4. 2.]
"""
def __init__(self, value, deriv=1.0, variables = 1, position = 0):
if isinstance(value, list):
self.val = np.array([value]).T
self.der = np.ones((len(self.val),1))*deriv
else:
self.val = value
self.der = deriv
if isinstance(deriv, list):
try:
self.der = np.zeros((len(self.val),len(deriv)))
self.der[ : ] = deriv
except TypeError:
self.der = np.array(deriv)
if variables >1:
try:
self.der = np.zeros((len(self.val),variables))
self.der[ : , position] = deriv
except TypeError:
self.der = np.zeros(variables)
self.der[position] = deriv
def __neg__(self):
return AutoDiff(-self.val, -self.der)
def __add__(self, other):
try:
return AutoDiff(self.val+other.val, self.der+other.der)
except AttributeError:
return AutoDiff(self.val+other, self.der)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
try:
return AutoDiff(self.val*other.val, self.der*other.val + self.val*other.der)
except AttributeError:
return AutoDiff(self.val*other, self.der*other)
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
try:
return AutoDiff(self.val-other.val, self.der-other.der)
except AttributeError:
return AutoDiff(self.val-other, self.der)
def __rsub__(self, other):
try:
return AutoDiff(other.val-self.val, other.der-self.der)
except AttributeError:
return AutoDiff(other-self.val, -self.der)
def __truediv__(self, other):
try:
return AutoDiff(self.val/other.val, (self.der*other.val - self.val*other.der)/(other.val**2))
except AttributeError:
return AutoDiff(self.val/other, self.der/other)
def __rtruediv__(self, other):
try:
return AutoDiff(other.val/self.val, (self.val*other.der- self.der*other.val)/(self.val**2))
except AttributeError:
return AutoDiff(other/self.val, -self.der*other/(self.val**2))
def __pow__(self, other):
try:
if isinstance(self.val, (np.ndarray, np.generic)):
self.val = self.val.astype(float)
return AutoDiff(self.val**other.val, other.val*(self.val**(other.val-1))*self.der+np.log(np.abs(self.val))*(self.val**other.val)*other.der)
except AttributeError:
if isinstance(self.val, (np.ndarray, np.generic)):
self.val = self.val.astype(float)
return AutoDiff(self.val**other, other*(self.val**(other-1))*self.der)
def __rpow__(self, other):
try:
return AutoDiff(other.val**self.val, self.val*(other.val**(self.val-1))*other.der+np.log(np.abs(other.val))*(other.val**self.val)*self.der)
except AttributeError:
return AutoDiff(other**self.val, np.log(np.abs(other))*(other**self.val)*self.der)
def __str__(self):
return 'value: {}, derivative: {}'.format(self.val,self.der)
def __eq__ (self, other):
try:
return ((self.val == other.val) and (self.der == other.der))
except AttributeError:
return False
def __ne__(self, other):
try:
return ((self.val != other.val) or (self.der != other.der))
except AttributeError:
return True
def reverse_mode(self):
# recurse only if the value is not yet cached
if self.grad_value is None:
# calculate derivative using chain rule
self.grad_value = sum(weight * var.reverse_mode()for weight, var in self.children)
return self.grad_value
def sin(x):
try:
return AutoDiff(np.sin(x.val), x.der*np.cos(x.val))
except AttributeError:
return np.sin(x)
def cos(x):
try:
return AutoDiff(np.cos(x.val), x.der*(-np.sin(x.val)))
except AttributeError:
return np.cos(x)
def tan(x):
try:
return AutoDiff(np.tan(x.val), x.der*(1/np.cos(x.val)**2))
except AttributeError:
return np.tan(x)
def arcsin(x):
try:
return AutoDiff(np.arcsin(x.val), x.der/(np.sqrt(1-x.val**2)))
except AttributeError:
return np.arcsin(x)
def arccos(x):
try:
return AutoDiff(np.arccos(x.val), -x.der/(np.sqrt(1-x.val**2)))
except AttributeError:
return np.arccos(x)
def arctan(x):
try:
return AutoDiff(np.arctan(x.val), x.der/((1+x.val**2)))
except AttributeError:
return np.arctan(x)
def exp(x):
try:
return AutoDiff(np.exp(x.val), x.der*np.exp(x.val))
except AttributeError:
return np.exp(x)
def sinh(x):
try:
return AutoDiff(np.sinh(x.val), x.der* | np.cosh(x.val) | numpy.cosh |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import copy
import numpy as np
import healpy as hp
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.interpolation import map_coordinates
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.coordinates import Galactic, ICRS
import gammapy
import fermipy.utils as utils
import fermipy.wcs_utils as wcs_utils
import fermipy.hpx_utils as hpx_utils
import fermipy.fits_utils as fits_utils
from fermipy.hpx_utils import HPX, HpxToWcsMapping
def coadd_maps(geom, maps, preserve_counts=True):
"""Coadd a sequence of `~gammapy.maps.Map` objects."""
# FIXME: This functionality should be built into the Map.coadd method
map_out = gammapy.maps.Map.from_geom(geom)
for m in maps:
m_tmp = m
if isinstance(m, gammapy.maps.HpxNDMap):
if m.geom.order < map_out.geom.order:
factor = map_out.geom.nside // m.geom.nside
m_tmp = m.upsample(factor, preserve_counts=preserve_counts)
map_out.coadd(m_tmp)
return map_out
def make_coadd_map(maps, proj, shape, preserve_counts=True):
if isinstance(proj, WCS):
return make_coadd_wcs(maps, proj, shape)
elif isinstance(proj, HPX):
return make_coadd_hpx(maps, proj, shape, preserve_counts=preserve_counts)
else:
raise Exception("Can't co-add map of unknown type %s" % type(proj))
def make_coadd_wcs(maps, wcs, shape):
data = np.zeros(shape)
axes = wcs_utils.wcs_to_axes(wcs, shape)
for m in maps:
c = wcs_utils.wcs_to_coords(m.wcs, m.counts.shape)
o = np.histogramdd(c.T, bins=axes[::-1], weights=np.ravel(m.counts))[0]
data += o
return Map(data, copy.deepcopy(wcs))
def make_coadd_hpx(maps, hpx, shape, preserve_counts=True):
data = np.zeros(shape)
axes = hpx_utils.hpx_to_axes(hpx, shape)
for m in maps:
if m.hpx.order != hpx.order:
m_copy = m.ud_grade(hpx.order, preserve_counts)
else:
m_copy = m
c = hpx_utils.hpx_to_coords(m_copy.hpx, m_copy.counts.shape)
o = np.histogramdd(c.T, bins=axes, weights=np.ravel(m_copy.counts))[0]
data += o
return HpxMap(data, copy.deepcopy(hpx))
def read_map_from_fits(fitsfile, extname=None):
"""
"""
proj, f, hdu = fits_utils.read_projection_from_fits(fitsfile, extname)
if isinstance(proj, WCS):
ebins = fits_utils.find_and_read_ebins(f)
m = Map(hdu.data, proj, ebins=ebins)
elif isinstance(proj, HPX):
m = HpxMap.create_from_hdu(hdu, proj.ebins)
else:
raise Exception("Did not recognize projection type %s" % type(proj))
return m
class Map_Base(object):
""" Abstract representation of a 2D or 3D counts map."""
def __init__(self, counts):
self._counts = counts
@property
def counts(self):
return self._counts
@property
def data(self):
return self._counts
@data.setter
def data(self, val):
if val.shape != self.data.shape:
raise Exception('Wrong shape.')
self._counts = val
def get_pixel_skydirs(self):
"""Get a list of sky coordinates for the centers of every pixel. """
raise NotImplementedError("MapBase.get_pixel_skydirs()")
def get_pixel_indices(self, lats, lons):
"""Return the indices in the flat array corresponding to a set of coordinates """
raise NotImplementedError("MapBase.get_pixel_indices()")
def sum_over_energy(self):
"""Reduce a counts cube to a counts map by summing over the energy planes """
raise NotImplementedError("MapBase.sum_over_energy()")
def get_map_values(self, lons, lats, ibin=None):
"""Return the map values corresponding to a set of coordinates. """
raise NotImplementedError("MapBase.get_map_values()")
def interpolate(self, lon, lat, egy=None):
"""Return the interpolated map values corresponding to a set of coordinates. """
raise NotImplementedError("MapBase.interpolate()")
class Map(Map_Base):
""" Representation of a 2D or 3D counts map using WCS. """
def __init__(self, counts, wcs, ebins=None):
"""
Parameters
----------
counts : `~numpy.ndarray`
Counts array in row-wise ordering (LON is first dimension).
"""
Map_Base.__init__(self, counts)
self._wcs = wcs
self._npix = counts.shape[::-1]
if len(self._npix) == 3:
self._xindex = 2
self._yindex = 1
elif len(self._npix) == 2:
self._xindex = 1
self._yindex = 0
else:
raise Exception('Wrong number of dimensions for Map object.')
# if len(self._npix) != 3 and len(self._npix) != 2:
# raise Exception('Wrong number of dimensions for Map object.')
self._width = np.array([np.abs(self.wcs.wcs.cdelt[0]) * self.npix[0],
np.abs(self.wcs.wcs.cdelt[1]) * self.npix[1]])
self._pix_center = np.array([(self.npix[0] - 1.0) / 2.,
(self.npix[1] - 1.0) / 2.])
self._pix_size = np.array([np.abs(self.wcs.wcs.cdelt[0]),
np.abs(self.wcs.wcs.cdelt[1])])
self._skydir = SkyCoord.from_pixel(self._pix_center[0],
self._pix_center[1],
self.wcs)
self._ebins = ebins
if ebins is not None:
self._ectr = np.exp(utils.edge_to_center(np.log(ebins)))
else:
self._ectr = None
@property
def wcs(self):
return self._wcs
@property
def npix(self):
return self._npix
@property
def skydir(self):
"""Return the sky coordinate of the image center."""
return self._skydir
@property
def width(self):
"""Return the dimensions of the image."""
return self._width
@property
def pix_size(self):
"""Return the pixel size along the two image dimensions."""
return self._pix_size
@property
def pix_center(self):
"""Return the ROI center in pixel coordinates."""
return self._pix_center
@classmethod
def create_from_hdu(cls, hdu, wcs):
return cls(hdu.data.T, wcs)
@classmethod
def create_from_fits(cls, fitsfile, **kwargs):
hdu = kwargs.get('hdu', 0)
with fits.open(fitsfile) as hdulist:
header = hdulist[hdu].header
data = hdulist[hdu].data
header = fits.Header.fromstring(header.tostring())
wcs = WCS(header)
ebins = None
if 'ENERGIES' in hdulist:
tab = Table.read(fitsfile, 'ENERGIES')
ectr = np.array(tab.columns[0])
ebins = np.exp(utils.center_to_edge(np.log(ectr)))
elif 'EBOUNDS' in hdulist:
tab = Table.read(fitsfile, 'EBOUNDS')
emin = np.array(tab['E_MIN']) / 1E3
emax = np.array(tab['E_MAX']) / 1E3
ebins = np.append(emin, emax[-1])
return cls(data, wcs, ebins)
@classmethod
def create(cls, skydir, cdelt, npix, coordsys='CEL', projection='AIT', ebins=None, differential=False):
crpix = np.array([n / 2. + 0.5 for n in npix])
if ebins is not None:
if differential:
nebins = len(ebins)
else:
nebins = len(ebins) - 1
data = np.zeros(list(npix) + [nebins]).T
naxis = 3
else:
data = np.zeros(npix).T
naxis = 2
wcs = wcs_utils.create_wcs(skydir, coordsys, projection,
cdelt, crpix, naxis=naxis, energies=ebins)
return cls(data, wcs, ebins=ebins)
def create_image_hdu(self, name=None, **kwargs):
return fits.ImageHDU(self.counts, header=self.wcs.to_header(),
name=name)
def create_primary_hdu(self):
return fits.PrimaryHDU(self.counts, header=self.wcs.to_header())
def sum_over_energy(self):
""" Reduce a 3D counts cube to a 2D counts map
"""
# Note that the array is using the opposite convention from WCS
# so we sum over axis 0 in the array, but drop axis 2 in the WCS object
return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))
def xypix_to_ipix(self, xypix, colwise=False):
"""Return the flattened pixel indices from an array multi-dimensional
pixel indices.
Parameters
----------
xypix : list
List of pixel indices in the order (LON,LAT,ENERGY).
colwise : bool
Use column-wise pixel indexing.
"""
return np.ravel_multi_index(xypix, self.npix,
order='F' if colwise else 'C',
mode='raise')
def ipix_to_xypix(self, ipix, colwise=False):
"""Return array multi-dimensional pixel indices from flattened index.
Parameters
----------
colwise : bool
Use column-wise pixel indexing.
"""
return np.unravel_index(ipix, self.npix,
order='F' if colwise else 'C')
def ipix_swap_axes(self, ipix, colwise=False):
""" Return the transposed pixel index from the pixel xy coordinates
if colwise is True (False) this assumes the original index was
in column wise scheme
"""
xy = self.ipix_to_xypix(ipix, colwise)
return self.xypix_to_ipix(xy, not colwise)
def get_pixel_skydirs(self):
"""Get a list of sky coordinates for the centers of every pixel.
"""
xpix = np.linspace(0, self.npix[0] - 1., self.npix[0])
ypix = np.linspace(0, self.npix[1] - 1., self.npix[1])
xypix = np.meshgrid(xpix, ypix, indexing='ij')
return SkyCoord.from_pixel(np.ravel(xypix[0]),
np.ravel(xypix[1]), self.wcs)
def get_pixel_indices(self, lons, lats, ibin=None):
"""Return the indices in the flat array corresponding to a set of coordinates
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all energy bins.
Returns
----------
pixcrd : list
Pixel indices along each dimension of the map.
"""
lons = np.array(lons, ndmin=1)
lats = np.array(lats, ndmin=1)
if len(lats) != len(lons):
raise RuntimeError('Map.get_pixel_indices, input lengths '
'do not match %i %i' % (len(lons), len(lats)))
if len(self._npix) == 2:
pix_x, pix_y = self._wcs.wcs_world2pix(lons, lats, 0)
pixcrd = [np.floor(pix_x).astype(int), np.floor(pix_y).astype(int)]
elif len(self._npix) == 3:
all_lons = np.expand_dims(lons, -1)
all_lats = np.expand_dims(lats, -1)
if ibin is None:
all_bins = (np.expand_dims(
np.arange(self.npix[2]), -1) * | np.ones(lons.shape) | numpy.ones |
"""
Homework4.
Replace 'pass' by your implementation.
"""
import numpy as np
import helper
# Insert your package here
from sympy import *
from scipy.ndimage.filters import gaussian_filter
from scipy.optimize import leastsq, minimize
'''
Q2.1: Eight Point Algorithm
Input: pts1, Nx2 Matrix
pts2, Nx2 Matrix
M, a scalar parameter computed as max (imwidth, imheight)
Output: F, the fundamental matrix
'''
def eightpoint(pts1, pts2, M):
# Replace pass by your implementation
T = np.eye(3) / M
T[2, 2] = 1;
pts1 = pts1.astype('float')/M
pts2 = pts2.astype('float')/M
A = np.vstack([
pts1[:, 0]*pts2[:, 0],pts1[:, 0]*pts2[:, 1], pts1[:, 0],
pts1[:, 1]*pts2[:, 0],pts1[:, 1]*pts2[:, 1], pts1[:, 1],
pts2[:, 0],pts2[:, 1], np.ones(pts1.shape[0])
]).T
[U, S, V] = np.linalg.svd(A)
F = np.reshape(V[-1,:], (3,3))
F = helper.refineF(F, pts1, pts2)
F = T.T @ F @ T
return F
'''
Q2.2: Seven Point Algorithm
Input: pts1, Nx2 Matrix
pts2, Nx2 Matrix
M, a scalar parameter computed as max (imwidth, imheight)
Output: Farray, a list of estimated fundamental matrix.
'''
def sevenpoint(pts1, pts2, M):
T = np.eye(3) / M
T[2, 2] = 1;
pts1 = pts1.astype('float')/M
pts2 = pts2.astype('float')/M
Fs = []
A = np.vstack([
pts1[:, 0]*pts2[:, 0],pts1[:, 0]*pts2[:, 1], pts1[:, 0],
pts1[:, 1]*pts2[:, 0],pts1[:, 1]*pts2[:, 1], pts1[:, 1],
pts2[:, 0],pts2[:, 1], | np.ones(pts1.shape[0]) | numpy.ones |
#!/usr/bin/env python3
import numpy as np
import nabla
from nabla import grad, Dual, minimise
def close(x, y, eps=1e-12):
return abs(x-y)<eps
def dualclose(x, y, eps=1e-12):
isclose = close(x.real, y.real, eps)
for i in range(x.nvars):
isclose = isclose and close(x.dual[i], y.dual[i], eps)
return isclose
def test_dual():
x = Dual(2,3)
y = Dual(4,5)
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==8
z = x - y
assert z.real==-2 and z.dual[0]==-2
z = x * y
assert z.real==8 and z.dual[0]==22
z = x / y
assert z.real==0.5 and z.dual[0]==(3*4 - 2*5)/4**2
x = Dual(2,3)
y = 4
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==3
z = x - y
assert z.real==-2 and z.dual[0]==3
z = x * y
assert z.real==8 and z.dual[0]==12
z = x / y
assert z.real==0.5 and z.dual[0]==(3*4 - 2*0)/4**2
x = 2
y = Dual(4,5)
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==5
z = x - y
assert z.real==-2 and z.dual[0]==-5
z = x * y
assert z.real==8 and z.dual[0]==10
z = x / y
assert z.real==0.5 and z.dual[0]==(0*4 - 2*5)/4**2
sqrty = np.sqrt(y)
ytohalf = y ** 0.5
assert close(sqrty.real, ytohalf.real) and close(sqrty.dual[0], ytohalf.dual[0])
z = 2**y
zalt = Dual(2)**y
assert close(z.real, zalt.real) and close(z.dual[0], zalt.dual[0])
x = Dual(2,3)
y = Dual(4,5)
w = x*y
z = nabla.dot([x], [y])
assert dualclose(z, w)
z = nabla.dot(np.array([x]), np.array([y]))
assert dualclose(z, w)
z = nabla.dot(np.array([x, y]), np.array([x, x]))
assert dualclose(z, x*x + y*x)
z = nabla.dot(np.array([x, 3]), np.array([x, x]))
assert dualclose(z, x*x + 3*x)
def test_dual_multivar():
x = Dual(2, [3, 1])
y = Dual(4, [5, 2])
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==8 and z.dual[1]==3
z = x - y
assert z.real==-2 and z.dual[0]==-2 and z.dual[1]==-1
z = x * y
assert z.real==8 and z.dual[0]==22 and z.dual[1]==8
z = x / y
assert z.real==0.5 and z.dual[0]==(3*4 - 2*5)/4**2 and z.dual[1]==(1*4 - 2*2)/4**2
x = Dual(2, [3, 1])
y = 4
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==3 and z.dual[1]==1
z = x - y
assert z.real==-2 and z.dual[0]==3 and z.dual[1]==1
z = x * y
assert z.real==8 and z.dual[0]==12 and z.dual[1]==4
z = x / y
assert z.real==0.5 and z.dual[0]==(3*4 - 2*0)/4**2 and z.dual[1]==(1*4 - 2*0)/4**2
x = 2
y = Dual(4, [5, 2])
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==5 and z.dual[1]==2
z = x - y
assert z.real==-2 and z.dual[0]==-5 and z.dual[1]==-2
z = x * y
assert z.real==8 and z.dual[0]==10 and z.dual[1]==4
z = x / y
assert z.real==0.5 and z.dual[0]==(0*4 - 2*5)/4**2 and z.dual[1]==(0*4 - 2*2)/4**2
def test_gradsimple():
@grad
def sq(x):
return x*x
@grad
def cupow(x):
return x**3
@grad
def plusthree(x):
return x + 3
z = sq(3)
assert z.real==9 and z.dual[0]==6
z = cupow(4)
assert close(z.real, 64) and close(z.dual[0], 48)
z = plusthree(3)
assert z.real==6 and z.dual[0]==1
def f(x):
return x*x
w = grad(0)(f)(np.array([1,2,3]))
assert dualclose(w[0], Dual(1, [2, 0, 0]))
assert dualclose(w[1], Dual(4, [0, 4, 0]))
assert dualclose(w[2], Dual(9, [0, 0, 6]))
w = grad(f)(np.array([1,2,3]))
assert dualclose(w[0], Dual(1, [2, 0, 0]))
assert dualclose(w[1], Dual(4, [0, 4, 0]))
assert dualclose(w[2], Dual(9, [0, 0, 6]))
# Element-wide mult of np arrays
A = np.array([[1,2], [3,4]])
w = grad(f)(A)
assert dualclose(w[0,0], Dual(1, [2, 0, 0, 0]))
assert dualclose(w[0,1], Dual(4, [0, 4, 0, 0]))
assert dualclose(w[1,0], Dual(9, [0, 0, 6, 0]))
assert dualclose(w[1,1], Dual(16, [0, 0, 0, 8]))
# Reduction
def f(x):
return np.sum(x*x)
w = grad(f)(np.array([1,2,3]))
assert dualclose(w, Dual(14, [2, 4, 6]))
w = grad(0)(f)(np.array([1,2,3]))
assert dualclose(w, Dual(14, [2, 4, 6]))
# kwargs
z = sq(x=3)
assert z.real==9 and z.dual[0]==6
# non-numeric args
def fn(x, y):
print(y)
return 2*x**3
fgrad = nabla.grad()(fn)(2, y="non-numeric")
assert close(fgrad.real, 16) and fgrad.nvars==1 and close(fgrad.dual[0], 24)
fgrad = nabla.grad()(fn)(y="non-numeric", x=2)
assert close(fgrad.real, 16) and fgrad.nvars==1 and close(fgrad.dual[0], 24)
# Transcendental functions
x = Dual(5,1)
z = np.sin(x)
assert close(z.real, | np.sin(5) | numpy.sin |
import numpy as np
import pytest
from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, \
average_true_noise_covariance, naive_noise_covariance
test_cases_real_variance = [
(2 - 3j, 0, 0, 0),
(0, 1, 1, np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
(2j, 1, 1, 4 * np.exp(-2) * (np.sinh(2) - np.sinh(1)) + np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
(-2j, 1, 1, 4 * np.exp(-2) * (np.sinh(2) - np.sinh(1)) + np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
]
test_cases_imag_variance = [
(4 - 3j, 0, 0, 0),
(0, 1, 1, np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
(2j, 1, 1, 4 * np.exp(-2) * (np.cosh(2) - np.cosh(1)) + np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
(-2j, 1, 1, 4 * np.exp(-2) * (np.cosh(2) - np.cosh(1)) + np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
]
test_cases_covariance = [
(4 - 3j, 0, 0, 0),
(0, 1, 1, 0),
(2j, 1, 1, 0),
(-2j, 1, 1, 0),
(np.sqrt(2) * (1 + 1j), 1, 1, 0.5 * np.exp(-4) * (1 + 5 * (1 - np.exp(1)))),
]
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_real_variance)
def test_variance_of_real_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_var_real(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_imag_variance)
def test_variance_of_imag_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_var_imag(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_covariance)
def test_covariance_of_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_cov(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected, rtol=0, atol=1e-10)
def test_cartesian_noise_covariance_matrix():
sd_magnitude = 1
sd_phase = 1
measurement = np.zeros(2)
res = average_true_noise_covariance(measurement, sd_magnitude, sd_phase)
expected = np.diag(
[np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))] * 2 + [np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))] * 2)
np.testing.assert_allclose(res.todense(), expected)
def test_naive_covariance_matrix():
sd_magnitude = 1
sd_phase = 1
measurement = | np.array([0, 1j]) | numpy.array |
'''
Utility mesh function for batch generation
Author: <NAME>
Date: Novemebr 2019
Input: root : data path
num_faces : number of sampled faces, default 8000
nb_classes : number of classes, default 8
scale : scale to unite sphere for PointNet, default False
sampling : sampling method [random, fps, or knn], default random
mode : train or val, default train
Output: Class HessigheimDataset, get items: data numpy array NxF
label numpy array Nx1
weight numpy array Nx1
Dependencies: numpy - os - h5py - open3d - scipy - sklearn - matplotlib
'''
import numpy as np
import os
import h5py
import open3d
from scipy.spatial import cKDTree
from sklearn import preprocessing
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class HessigheimDataset():
def __init__(self, root, num_faces=8000, nb_classes=8, scale=False, sampling='random', mode='train'):
self.root = root
self.num_faces = num_faces
self.mode = mode
self.sampling = sampling
self.nb_classes = nb_classes
self.scale = scale
files = os.listdir(self.root)
self.data_all = None
self.label_all = None
for file in files:
if file.endswith('h5'):
hdf = h5py.File(self.root + '/{}'.format(file), mode='r')
face_tile = | np.array(hdf['data']) | numpy.array |
import numpy as np
from pyshtools.shclasses import SHCoeffs
from pyshtools.expand import spharm,SHExpandDH
from pyshtools.spectralanalysis import Curve2Mask
from pyshtools.shio import read_icgem_gfc
from sphericalpolygon import create_polygon
from ..gg.static_models import static_download
from ..gg.lovenums import lovenums
from ..gg.utils import month2int,crop_region
from ..gg.lsq import lsqm,ilsqm,wlsqm,iwlsqm
from ..gg.filter import filter_ddk,filter_gaussian,filter_gaussian_inverse
from ..gg.leakage import spectral_domain
from .class_Grid import Grid
from .class_Series import Series
class GSM(object):
'''
class GSM
- attributes:
- info -> All information about the object
- degree_order
- max_degree
- max_order
- normalization
- permanent_tide
- earth_gravity_param
- mean_equator_radius
- background_gravity
- title
- summary
- institution
- processing_level
- product_version
- time_coverage_start
- time_coverage_end
- total_month -> Months over a time interval regardless of the existence of the solutions
- total_month_counts
- solution_month
- solution_counts
- missing_month
- missing_month_counts
- missing_solution_flag -> If True, the monthly solution is missing, otherwise, the monthly solution exists
- unused_days -> Unused days for monthly solutions
- date_issued
- equi_material -> Equivalent material used to represent mass per unit area
- filter -> filter applied to monthly solutions
- shc
- shc_std
- methods:
- deaverage
- debackground
- replace_slr_c20
- filter_ddk
- filter_gaussian
- sma
- gsm
- rate
- grid
- study_area
- leakage_correction
'''
def __init__(self,info,shc,shc_std):
self.info = info
self.degree_order = info['degree_order']
self.max_degree = info['max_degree']
self.max_order = info['max_order']
self.normalization = info['normalization']
self.permanent_tide = info['permanent_tide']
self.earth_gravity_param = info['earth_gravity_param']
self.mean_equator_radius = info['mean_equator_radius']
self.background_gravity = info['background_gravity']
self.title = info['title']
self.summary = info['summary']
self.institution = info['institution']
self.processing_level = info['processing_level']
self.product_version = info['product_version']
self.time_coverage_start = info['time_coverage_start']
self.time_coverage_end = info['time_coverage_end']
self.total_month = info['total_month']
self.total_month_counts = info['total_month_counts']
self.solution_month = info['solution_month']
self.solution_counts = info['solution_counts']
self.missing_month = info['missing_month']
self.missing_month_counts = info['missing_month_counts']
self.missing_solution_flag = info['missing_solution_flag']
self.unused_days = info['unused_days']
self.date_issued = info['date_issued']
self.equi_material = info['equi_material']
self.filter = info['filter']
self.shc = shc
self.shc_std = shc_std
def __repr__(self):
return 'title = {:s}\nmax_degree = {:d}\nmax_order = {:d}\ndegree_order = {:d}\nnormalization = {:s}\ninstitution = {:s}\nprocessing_level = {:s}\nproduct_version = {:s}\ntime_coverage_start = {:s}\ntime_coverage_end = {:s}\nsolution_counts = {:d}\ntotal_month_counts = {:d}\nmissing_month_counts = {:d}'.format\
(self.title,self.max_degree,self.max_order,self.degree_order,self.normalization,self.institution,self.processing_level,self.product_version,self.time_coverage_start,self.time_coverage_end,self.solution_counts,self.total_month_counts,self.missing_month_counts)
def deaverage(self):
'''
Deaverage the GSM solutions of the GRACE and GRACE-FO RL06 products
Usage:
xxx_gsm_d = xxx_gsm.deaverage()
Outputs:
xxx_gsm_d -> instance of SHC class
Examples:
>>> csr_gsm_d = csr_gsm.deaverage()
>>> print(csr_gsm_d)
'''
info = self.info.copy()
shc_deaverage = self.shc - np.average(self.shc,axis=0)
info['title'] = 'Deaveraged ' + info['title']
info['background_gravity'] = 'Average of monthly solutions'
return GSM(info,shc_deaverage,self.shc_std)
def debackground(self):
'''
Debackground the GSM solutions of the GRACE and GRACE-FO RL06 products
Usage:
xxx_gsm_d = xxx_gsm.debackground()
Outputs:
xxx_gsm_d -> instance of GSM class
Examples:
>>> csr_gsm_d = csr_gsm.debackground()
>>> print(csr_gsm_d)
'''
info = self.info.copy()
degree_order = self.degree_order
background_gravity = self.background_gravity
gravity_file = static_download(background_gravity)
if background_gravity == 'GGM05C':
cilm,gm,r0,errors = read_icgem_gfc(gravity_file ,lmax=degree_order,errors='calibrated')
elif background_gravity == 'EIGEN-6C4':
cilm,gm,r0,errors = read_icgem_gfc(gravity_file,lmax=degree_order,errors='formal')
else:
raise Exception('Currently, available background gravity models are GGM05C and EIGEN-6C4')
shc_debackground = self.shc - cilm
info['title'] = 'Debackgrounded ' + info['title']
return GSM(info,shc_debackground,self.shc_std)
def replace_slr_c20(self,slr_c20):
'''
Replace the C20 values from the GSM files of the GRACE and GRACE-FO RL06 products with the 2nd degree terms from SLR measurements.
Usage:
xxx_gsm_r = xxx_gsm.replace_slr_c20(slr_c20)
Inputs:
slr_c20 -> instance of SLR_C20 class
Outputs:
xxx_gsm_r -> instance of GSM class
Examples:
>>> csr_gsm_r = csr_gsm.replace_slr_c20(slr_c20)
>>> print(csr_gsm_r)
'''
shc,shc_std = self.shc.copy(),self.shc_std.copy()
shc[:,0,2,0] = slr_c20.c20
shc_std[:,0,2,0] = slr_c20.c20_std
info = self.info.copy()
info['title'] = info['title'] + ' with C20 replaced by the SLR measurements'
info['summary'] = info['summary'] + ' Note that the 2nd-degree terms have been replaced with the values from SLR C20.'
return GSM(info,shc,shc_std)
def filter_ddk(self,filter_type = 'DDK5'):
'''
Filt the deaveraged GSM SHC with the DDK filter, where DDK1,DDK2,...,DDK8 are avaliable.
Usage:
xxx_gsm_fddk = xxx_gsm.filt_DDK()
Parameters:
filt_type: [optional, str, default = 'DDK5'] types of DDK filter. Avaliable options are 'DDK1', 'DDK2',...,'DDK8'
Outputs:
xxx_gsm_fddk: instance of GSM class
Examples:
>>> slr_c20 = read_slr_c20(end_date='2017-06')
>>> slr_c20_deaverage = slr_c20.deaverage()
>>> csr_gsm = read_gsm('CSR',96,lmax=179,end_date='2017-06')
>>> csr_gsm_d = csr_gsm.deaverage()
>>> csr_gsm_r = csr_gsm_d.replace_slr_c20(slr_c20_deaverage)
>>> csr_gsm_fddk = csr_gsm_r.filt_ddk('DDK5')
>>> print(csr_gsm_fddk.title)
>>> print(csr_gsm_fddk.summary)
DDK5 filtered Deaveraged GRACE Geopotential Coefficients CSR RL06 with C20 replaced by the SLR measurements
Spherical harmonic coefficients representing an estimate of the mean gravity field of Earth during the specified timespan derived from GRACE mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. The 0th and 1st degree terms are excluded from CSR level-2. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
'''
filter_shc,filter_shc_std = filter_ddk(filter_type,self.shc,self.shc_std)
info = self.info.copy()
info['title'] = filter_type + ' filtered ' + info['title']
info['filter'] = filter_type
if 'with C20 replaced by the SLR measurements' in info['title']:
info['summary'] = info['summary'] + ' Also note that C20 from SLR also experienced the ' + filter_type + ' filtering.'
return GSM(info,filter_shc,filter_shc_std)
def filter_gaussian(self,r):
'''
Filtering the deaveraged GSM SHC with the Gaussian filter.
Usage: xxx_gsm_gau = xxx_gsm.filt_gaussian()
Inputs:
r -> [float] Gaussian filter radius in km
Outputs:
xxx_gsm_gau: instance of GSM class
Examples:
>>> slr_c20 = read_slr_c20(end_date='2017-06')
>>> slr_c20_deaverage = slr_c20.deaverage()
>>> csr_gsm = read_GSM('CSR',96,lmax=179,end_date='2017-06')
>>> csr_gsm_d = csr_gsm.deaverage()
>>> csr_gsm_r = csr_gsm_d.replace_slr_c20(slr_c20_deaverage)
>>> csr_gsm_fgau = csr_gsm_r.filt_gaussian(200)
>>> print(csr_gsm_fgau.title)
Gaussian filtered Deaveraged GRACE Geopotential Coefficients CSR RL06 with C20 replaced by the SLR measurements
>>> print(csr_gsm_fgau.summary)
Spherical harmonic coefficients representing an estimate of the mean gravity field of Earth during the specified timespan derived from GRACE mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. The 0th and 1st degree terms are excluded from CSR level-2. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the Gaussian filtering.
'''
filter_shc,filter_shc_std = filter_gaussian(r,self.shc,self.shc_std)
info = self.info.copy()
info['title'] = 'Gaussian filtered ' + info['title']
info['filter'] = 'Gaussian filter with radius of '+str(r) + ' km'
if 'with C20 replaced by the SLR measurements' in info['title']:
info['summary'] = info['summary'] + ' Also note that C20 from SLR also experienced the Gaussian filtering.'
return GSM(info,filter_shc,filter_shc_std)
def sma(self, equi_material = None):
'''
Convert Stokes coefficents(or rates) for GSM to that for Surface Mass Anomaly in Equivalent Water(or Ice, Sand) Thickness(EWT) with unit of [mm w.e.](or [mm i.e.],[mm s.e.]) or [mm w.e./yr](or [mm i.e./yr],[mm s.e./yr])
Usage:
xxx_sma = xxx_gsm.sma()
Parameters:
equi_material -> [optional, str, default = None] Equivalent material for Surface Mass Anomaly. Currently, only Water, Ice, and Sand are avaliable.
Outputs:
xxx_sma: instance of SMA class
Examples:
>>> csr_gsm = read_gsm('CSR',96)
>>> gfz_gsm = read_gsm('GFZ',96)
>>> jpl_gsm = read_gsm('JPL',96)
>>> slr_c20 = read_slr_c20()
>>> comb_gsm = GSM_average([csr_gsm.deaverage(),gfz_gsm.deaverage(),jpl_gsm.deaverage()])
>>> comb_gsm_r = comb_gsm.replace_slr_c20(slr_c20.deaverage())
>>> comb_gsm_ddk5 = comb_gsm_r.filt_DDK('DDK5')
>>> sma = comb_gsm_ddk5.sma('Sand')
>>> print(sma.title)
Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent Sand Thickness(EWT) derived from the DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements
>>> print(sma.summary)
Spherical harmonic coefficients representing an estimate of the Surface Mass Anomaly(SMA) expressed in terms of Equivalent Sand[1442kg/m3] Thickness(EWT) with unit of [mm s.e.] during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
>>> print(sma.material)
Sand
'''
if equi_material is None:
equi_material = self.equi_material
if equi_material == 'Water':
rho = 1000
elif equi_material == 'Ice':
rho = 917
elif equi_material == 'Sand':
rho = 1442
else:
raise Exception('Currently, the equivalent material for SMA can only be Water, Ice, or Sand.')
# Calculate the average density of the Earth
G = 6.67430e-11
GM = float(self.earth_gravity_param.partition('m3/s2')[0])
a = float(self.mean_equator_radius.partition('m')[0])
rho_ave = 3*GM/(4*G*np.pi*a**3)
sma_shc = np.zeros_like(self.shc)
sma_shc_std = np.zeros_like(self.shc_std)
for l in range(self.degree_order+1):
k_l =lovenums(l)
factor = a*rho_ave/(3*rho)*(2*l+1)/(1+k_l)
#factor = a*h_l[l]/(1+k_l) # for vertical displacement
sma_shc[:,:,l,:] = factor*self.shc[:,:,l,:]*1e3 # in mm
sma_shc_std[:,:,l,:] = factor*self.shc_std[:,:,l,:]*1e3
info = self.info.copy()
if 'change rate' in info['title']:
info['title'] = 'Stokes coefficients for annual change rate of Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ' + info['title']
info['summary'] = info['summary'].replace('mean gravity field of Earth','Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e./yr]')
else:
info['title'] = 'Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ' + info['title']
info['summary'] = info['summary'].replace('mean gravity field of Earth','Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e.]')
info['equi_material'] = equi_material
return GSM(info,sma_shc,sma_shc_std)
def gsm(self):
'''
Convert Stokes coefficents for Surface Mass Anomaly in Equivalent Water(or Ice, Sand) Thickness(EWT) with unit of [mm w.e.] to that for GSM
Usage: xxx_gsm = xxx_sma.gsm()
Parameters:
-----------
None
Returns:
-----------
xxx_gsm: instance of GSM class
Examples:
-----------
>>> csr_gsm = read_GSM('CSR',96)
>>> gfz_gsm = read_GSM('GFZ',96)
>>> jpl_gsm = read_GSM('JPL',96)
>>> slr_c20 = read_SLR_C20()
>>> comb_gsm = GSM_average([csr_gsm.deaverage(),gfz_gsm.deaverage(),jpl_gsm.deaverage()])
>>> comb_gsm_r = comb_gsm.replace_slr_c20(slr_c20.deaverage())
>>> comb_gsm_ddk5 = comb_gsm_r.filt_DDK('DDK5')
>>> sma = comb_gsm_ddk5.sma('Sand')
>>> gsm = sma.gsm()
>>> print(sma.title)
Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent Sand Thickness(EWT) derived from the DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements
>>> print(sma.summary)
Spherical harmonic coefficients representing an estimate of the Surface Mass Anomaly(SMA) expressed in terms of Equivalent Sand[1442kg/m3] Thickness(EWT) with unit of [mm w.e.] during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
>>> print(sma.material)
Sand
>>> print(gsm.title)
DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements
>>> print(gsm.summary)
Spherical harmonic coefficients representing an estimate of the mean gravity field of Earth during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
>>> print(comb_gsm_ddk5.SHC[100,0,30,20])
1.8369657302246403e-12
>>> print(sma.SHC[100,0,30,20])
0.9456468755977168
>>> print(gsm.SHC[100,0,30,20])
1.8369657302246403e-12
'''
equi_material = self.equi_material
if equi_material is 'Water':
rho = 1000
elif equi_material is 'Ice':
rho = 917
elif equi_material is 'Sand':
rho = 1442
else:
raise Exception('Currently, the equivalent material can only be Water, Ice, or Sand.')
# Calculate the average density of the Earth
G = 6.67430e-11
GM = float(self.earth_gravity_param.partition('m3/s2')[0])
a = float(self.mean_equator_radius.partition('m')[0])
rho_ave = 3*GM/(4*G*np.pi*a**3)
gsm_shc = np.zeros_like(self.shc)
gsm_shc_std = np.zeros_like(self.shc_std)
for l in range(self.degree_order+1):
k_l =lovenums(l)
factor = 3*rho/(a*rho_ave)*(1+k_l)/(2*l+1)
gsm_shc[:,:,l,:] = factor*self.shc[:,:,l,:]/1e3
gsm_shc_std[:,:,l,:] = factor*self.shc_std[:,:,l,:]/1e3
info = self.info.copy()
if 'change rate' in info['title']:
info['title'] = info['title'].replace('Stokes coefficients for annual change rate of Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ','')
info['title'] = info['title'].replace('Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ','')
info['summary'] = info['summary'].replace('Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e./yr]','mean gravity field of Earth')
else:
info['title'] = info['title'].replace('Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ','')
info['summary'] = info['summary'].replace('Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e.]','mean gravity field of Earth')
return GSM(info,gsm_shc,gsm_shc_std)
def rate(self,mode='ILSQM'):
'''
Estimate the annual change rate of Geopotential coefficients or Stokes coefficents for Surface Mass Anomaly in Equivalent Water(or Ice, Sand) Thickness(EWT) using the linearly fitting method.
There are four methods for linearly fitting, including 'LSM', 'ILSM', 'WLSM', and 'IWSLM'. The ILSM is default and recommended.
Usage: xxx_sma_rate = xxx_sma.rate() or xxx_gsm_rate = xxx_gsm.rate('IWLSM')
Parameters:
-----------
lsm [str] [optional, default: ILSM] alternertively, 'LSM', 'ILSM', 'WLSM', and 'IWSLM' are available, where
'LSM' -- Least Square Method
'ILSM' -- Iterative Least Square Method
'WLSM' -- Weighted Least Square Method
'IWSLM' -- Iterative Weighted Least Square Method
Returns:
-----------
xxx_gsm: instance of GSM class
Examples:
-----------
>>> csr_gsm = read_GSM('CSR',96)
>>> gfz_gsm = read_GSM('GFZ',96)
>>> jpl_gsm = read_GSM('JPL',96)
>>> slr_c20 = read_SLR_C20()
>>> comb_gsm = GSM_average([csr_gsm.deaverage(),gfz_gsm.deaverage(),jpl_gsm.deaverage()])
>>> comb_gsm_r = comb_gsm.replace_slr_c20(slr_c20.deaverage())
>>> comb_gsm_ddk5 = comb_gsm_r.filt_DDK('DDK5')
>>> comb_gsm_ddk5_rate = comb_gsm_ddk5.rate()
>>> print(comb_gsm_ddk5_rate.title)
'Annual change rate of DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements'
>>> print(comb_gsm_ddk5_rate.summary)
'Spherical harmonic coefficients representing an estimate of annual change rate of the mean gravity field of Earth during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.'
>>> sma_rate = comb_gsm_ddk5_rate.sma()
>>> print(sma_rate.title)
'Stokes coefficients for annual change rate of Surface Mass Anomaly(SMA) in Equivalent Water Thickness(EWT) derived from the Annual change rate of DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements'
>>> print(sma_rate.summary)
'Spherical harmonic coefficients representing an estimate of annual change rate of the Surface Mass Anomaly(SMA) expressed in terms of Equivalent Water[1000kg/m3] Thickness(EWT) with unit of [mm w.e./yr] during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.'
'''
shc,shc_std = self.shc,self.shc_std
month = month2int(self.solution_month)
shc_rate,shc_rate_std = [np.zeros_like(shc[0]) for k in range(2)]
degree = order = self.degree_order
for i in range(2):
for l in range(2,degree+1): # start with 1 if consider the motion of mass center
for m in range(order+1):
if i==1 and m==0 or m > l: continue
if mode is 'LSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],_,_ = lsqm(month,shc[:,i,l,m])
elif mode is 'WLSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],_,_ = wlsqm(month,shc[:,i,l,m],shc_std[:,i,l,m])
elif mode is 'ILSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],normal,_,_ = ilsqm(month,shc[:,i,l,m])
elif mode is 'IWLSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],normal,_,_ = iwlsqm(month,shc[:,i,l,m],shc_std[:,i,l,m])
else:
raise Exception('Currently, the least square method can only be LSQM, WLSQM, ILSQM, and IWLSQM.')
info = self.info.copy()
info['title'] = 'Annual change rate of ' + info['title']
info['summary'] = info['summary'].replace('an estimate of','an estimate of annual change rate of')
for em in ['w','i','s']:
info['summary'] = info['summary'].replace('[mm '+em+'.e.]','[mm '+em+'.e./yr]')
return GSM(info,np.array([shc_rate]),np.array([shc_rate_std]))
def grid(self,region=None):
'''
Expand spherical harmonic coefficients into a regional grid
Usage:
xxx_sma_grid = xxx_sma.grid(region)
Parameters:
region: [float array] range of an area, for example, [96.0,120.0,21.0,39.0] means the boundary in lon and lat;
Returns:
an instance of GRID class
'''
# Create an empty list to contain the grid of EWT from GRACE for each month
grids = []
shcs,shcs_std = self.shc,self.shc_std
for cilm in shcs:
coeffs_class = SHCoeffs.from_array(cilm)
grids_class = coeffs_class.expand()
grids.append(grids_class.data)
grids = np.array(grids)
grids_std = np.zeros_like(grids)
lons = grids_class.lons()
lats = grids_class.lats()
if region is not None:
if 'rate' not in self.title:
print('The calculation will take a few minutes, please be patient.')
lons_region,lats_region,grids_region,grids_std_region,lons_flag,lats_flag = crop_region(lons,lats,grids,grids_std,region)
# Convert SHCs_std to grids_std
lmax = self.degree_order
k = 0
for shc_std in shcs_std:
i = 0
for theta in lats_region:
j = 0
for phi in lons_region:
ylm = spharm(lmax, 90-theta, phi)
grids_std_region[k,i,j] = np.sqrt(np.sum((ylm*shc_std)**2))
j+=1
i+=1
k+=1
else:
region = 'globe'
grids_region = grids
grids_std_region = np.zeros_like(grids_region)
lons_region,lats_region = lons,lats
lons_flag = np.ones(len(lons_region),dtype=bool)
lats_flag = np.ones(len(lats_region),dtype=bool)
# Note: Since it takes a lot of time to calculate the uncertainties of the global grid data, the uncertainties are all set to zero.
info = self.info.copy()
info['title'] = 'Grids expanded from ' + info['title']
info['summary'] = info['summary'].replace('Spherical harmonic coefficients','Grids')
info['summary'] = info['summary'].replace('coefficients','grids')
info['region'] = region
return Grid(info,grids_region,grids_std_region,lons_region,lats_region,lons_flag,lats_flag)
def study_area(self,points):
a = float(self.mean_equator_radius.partition('m')[0])/1e3 # km
if self.equi_material is 'Water':
rho = 1000
elif self.equi_material is 'Ice':
rho = 917
elif self.equi_material is 'Sand':
rho = 1442
qs,qs_std = [],[]
north_pole = create_polygon(points).contains_points([90,0]) # Determine if the North Pole is inside the study area
mask_grid = Curve2Mask(2*(self.degree_order+1),points,north_pole,sampling=2)
mask_shc = SHExpandDH(mask_grid,sampling=2)
area = mask_shc[0,0,0]*4*np.pi*a**2 # km^2
for shc in self.shc:
q = np.sum(shc*mask_shc)*4*np.pi*a**2*rho/1e9 # Gt
qs.append(q)
for shc_std in self.shc_std:
q_std = np.sqrt(np.sum((shc_std*mask_shc)**2))*4*np.pi*a**2*rho/1e9 # Gt
qs_std.append(q_std)
qs,qs_std = np.array(qs),np.array(qs_std)
info = self.info.copy()
info['title'] = 'Integral(over the study area) of '+info['title']
return Series(info,area,qs,qs_std)
def leakage_correction(self,method,r,nodes=None,study_area=None,mode=None,ratio=None):
if method == 'filter_inverse':
info = self.info.copy()
corrected_shc,corrected_shc_std = filter_gaussian_inverse(r,self.shc,self.shc_std)
info['title'] = 'Signal leakage corrected(filter inverse method) ' + info['title']
return GSM(info,corrected_shc,corrected_shc_std)
elif method == 'spectral_domain':
shcs = self.shc
if nodes.region == 'globe':
grids = self.grid()
else:
grids = self.grid(nodes.region)
info = grids.info
lmax = self.degree_order
n = (lmax+1)*2
mas,mas_std = spectral_domain(shcs,nodes,study_area,r,mode,ratio)
corrected_grids = np.zeros_like(grids.grids)
corrected_grids_std = corrected_grids.copy()
nodes_index = nodes.nodes_index
for k in range(len(nodes_index)):
i,j = nodes_index[k]
corrected_grids[:,i,j] = mas[:,k]
corrected_grids_std[:,i,j] = mas_std[:,k]
info['title'] = 'Signal leakage corrected(spectral domain method) ' + info['title']
return Grid(info,corrected_grids,corrected_grids_std,grids.lons,grids.lats,grids.lons_flag,grids.lats_flag)
else:
raise Exception('Only filter inverse and spectral domain are avaliable')
def __add__(self,other):
info = self.info.copy()
#info['title'] =
#info['summary'] =
self_shc,self_shc_std = self.shc,self.shc_std
other_shc,other_shc_std = other.shc,other.shc_std
if 'rate' in self.title and 'rate' in other.title:
add_shc = self_shc + other_shc
add_shc_std = np.sqrt(self_shc_std**2 + other_shc_std**2)
elif ('rate' in self.title and 'rate' not in other.title) or ('rate' not in self.title and 'rate' in other.title):
raise Exception('Addition can not be completed between rate object and series object')
else:
self_existing_solution_flag = ~self.missing_solution_flag
other_existing_solution_flag = ~other.missing_solution_flag
if (self_existing_solution_flag != other_existing_solution_flag).any():
existing_solution_flag = self_existing_solution_flag & other_existing_solution_flag
n = len(existing_solution_flag)
assumed_self_shc = np.zeros((n,)+self.shc.shape[1:])
assumed_other_shc = assumed_self_shc.copy()
assumed_self_shc[self_existing_solution_flag] = self.shc
assumed_other_shc[other_existing_solution_flag] = other.shc
self_shc = assumed_self_shc[existing_solution_flag]
other_shc = assumed_other_shc[existing_solution_flag]
assumed_self_shc_std = np.zeros((n,)+self.shc_std.shape[1:])
assumed_other_shc_std = assumed_self_shc_std.copy()
assumed_self_shc_std[self_existing_solution_flag] = self.shc_std
assumed_other_shc_std[other_existing_solution_flag] = other.shc_std
self_shc_std = assumed_self_shc_std[existing_solution_flag]
other_shc_std = assumed_other_shc_std[existing_solution_flag]
solution_month = self.total_month[existing_solution_flag]
solution_counts = len(solution_month)
missing_solution_flag = ~existing_solution_flag
missing_month = self.total_month[missing_solution_flag]
missing_month_counts = len(missing_month)
info['solution_month'] = solution_month
info['solution_counts'] = solution_counts
info['missing_month'] = missing_month
info['missing_month_counts'] = missing_month_counts
info['missing_solution_flag'] = missing_solution_flag
add_shc = self_shc + other_shc
add_shc_std = np.sqrt(self_shc_std**2 + other_shc_std**2)
return GSM(info,add_shc,add_shc_std)
def __sub__(self,other):
info = self.info.copy()
#info['title'] =
#info['summary'] =
self_shc,self_shc_std = self.shc,self.shc_std
other_shc,other_shc_std = other.shc,other.shc_std
if 'rate' in self.title and 'rate' in other.title:
sub_shc = self_shc - other_shc
sub_shc_std = np.sqrt(self_shc_std**2 + other_shc_std**2)
elif ('rate' in self.title and 'rate' not in other.title) or ('rate' not in self.title and 'rate' in other.title):
raise Exception('Subtraction can not be completed between rate object and series object')
else:
self_existing_solution_flag = ~self.missing_solution_flag
other_existing_solution_flag = ~other.missing_solution_flag
if (self_existing_solution_flag != other_existing_solution_flag).any():
existing_solution_flag = self_existing_solution_flag & other_existing_solution_flag
n = len(existing_solution_flag)
assumed_self_shc = | np.zeros((n,)+self.shc.shape[1:]) | numpy.zeros |
## @ingroup Components-Energy-Converters
# Rotor.py
#
# Created: Jun 2014, <NAME>
# Modified: Jan 2016, <NAME>
# Feb 2019, <NAME>
# Mar 2020, <NAME>
# Sep 2020, <NAME>
# Mar 2021, <NAME>
# Apr 2021, <NAME>
# Jul 2021, <NAME>
# Jul 2021, <NAME>
# Sep 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Data, Units
from SUAVE.Components.Energy.Energy_Component import Energy_Component
from SUAVE.Methods.Geometry.Three_Dimensional \
import orientation_product, orientation_transpose
from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.compute_HFW_inflow_velocities \
import compute_HFW_inflow_velocities
# package imports
import numpy as np
import scipy as sp
# ----------------------------------------------------------------------
# Generalized Rotor Class
# ----------------------------------------------------------------------
## @ingroup Components-Energy-Converters
class Rotor(Energy_Component):
"""This is a general rotor component.
Assumptions:
None
Source:
None
"""
def __defaults__(self):
"""This sets the default values for the component to function.
Assumptions:
None
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
self.tag = 'rotor'
self.number_of_blades = 0.0
self.tip_radius = 0.0
self.hub_radius = 0.0
self.twist_distribution = 0.0
self.sweep_distribution = 0.0 # quarter chord offset from quarter chord of root airfoil
self.chord_distribution = 0.0
self.mid_chord_alignment = 0.0
self.thickness_to_chord = 0.0
self.blade_solidity = 0.0
self.design_power = None
self.design_thrust = None
self.airfoil_geometry = None
self.airfoil_polars = None
self.airfoil_polar_stations = None
self.radius_distribution = None
self.rotation = 1
self.azimuthal_offset_angle = 0.0
self.orientation_euler_angles = [0.,0.,0.] # This is X-direction thrust in vehicle frame
self.ducted = False
self.number_azimuthal_stations = 24
self.number_points_around_airfoil = 40
self.induced_power_factor = 1.48 # accounts for interference effects
self.profile_drag_coefficient = .03
self.use_2d_analysis = False # True if rotor is at an angle relative to freestream or nonuniform freestream
self.nonuniform_freestream = False
self.axial_velocities_2d = None # user input for additional velocity influences at the rotor
self.tangential_velocities_2d = None # user input for additional velocity influences at the rotor
self.radial_velocities_2d = None # user input for additional velocity influences at the rotor
self.Wake_VD = Data()
self.wake_method = "momentum"
self.number_rotor_rotations = 6
self.number_steps_per_rotation = 100
self.wake_settings = Data()
self.wake_settings.initial_timestep_offset = 0 # initial timestep
self.wake_settings.wake_development_time = 0.05 # total simulation time required for wake development
self.wake_settings.number_of_wake_timesteps = 30 # total number of time steps in wake development
self.start_angle = 0.0 # angle of first blade from vertical
self.inputs.y_axis_rotation = 0.
self.inputs.pitch_command = 0.
self.variable_pitch = False
def spin(self,conditions):
"""Analyzes a general rotor given geometry and operating conditions.
Assumptions:
per source
Source:
<NAME>. "Qprop Formulation", MIT AeroAstro, June 2006
http://web.mit.edu/drela/Public/web/qprop/qprop_theory.pdf
Leishman, <NAME>. Principles of helicopter aerodynamics
Cambridge university press, 2006.
Inputs:
self.inputs.omega [radian/s]
conditions.freestream.
density [kg/m^3]
dynamic_viscosity [kg/(m-s)]
speed_of_sound [m/s]
temperature [K]
conditions.frames.
body.transform_to_inertial (rotation matrix)
inertial.velocity_vector [m/s]
conditions.propulsion.
throttle [-]
Outputs:
conditions.propulsion.outputs.
number_radial_stations [-]
number_azimuthal_stations [-]
disc_radial_distribution [m]
speed_of_sound [m/s]
density [kg/m-3]
velocity [m/s]
disc_tangential_induced_velocity [m/s]
disc_axial_induced_velocity [m/s]
disc_tangential_velocity [m/s]
disc_axial_velocity [m/s]
drag_coefficient [-]
lift_coefficient [-]
omega [rad/s]
disc_circulation [-]
blade_dQ_dR [N/m]
blade_dT_dr [N]
blade_thrust_distribution [N]
disc_thrust_distribution [N]
thrust_per_blade [N]
thrust_coefficient [-]
azimuthal_distribution [rad]
disc_azimuthal_distribution [rad]
blade_dQ_dR [N]
blade_dQ_dr [Nm]
blade_torque_distribution [Nm]
disc_torque_distribution [Nm]
torque_per_blade [Nm]
torque_coefficient [-]
power [W]
power_coefficient [-]
Properties Used:
self.
number_of_blades [-]
tip_radius [m]
twist_distribution [radians]
chord_distribution [m]
orientation_euler_angles [rad, rad, rad]
"""
# Unpack rotor blade parameters
B = self.number_of_blades
R = self.tip_radius
beta_0 = self.twist_distribution
c = self.chord_distribution
sweep = self.sweep_distribution # quarter chord distance from quarter chord of root airfoil
r_1d = self.radius_distribution
tc = self.thickness_to_chord
# Unpack rotor airfoil data
a_geo = self.airfoil_geometry
a_loc = self.airfoil_polar_stations
cl_sur = self.airfoil_cl_surrogates
cd_sur = self.airfoil_cd_surrogates
# Unpack rotor inputs and conditions
omega = self.inputs.omega
Na = self.number_azimuthal_stations
nonuniform_freestream = self.nonuniform_freestream
use_2d_analysis = self.use_2d_analysis
wake_method = self.wake_method
rotation = self.rotation
pitch_c = self.inputs.pitch_command
# Check for variable pitch
if np.any(pitch_c !=0) and not self.variable_pitch:
print("Warning: pitch commanded for a fixed-pitch rotor. Changing to variable pitch rotor for weights analysis.")
self.variable_pitch = True
# Unpack freestream conditions
rho = conditions.freestream.density[:,0,None]
mu = conditions.freestream.dynamic_viscosity[:,0,None]
a = conditions.freestream.speed_of_sound[:,0,None]
T = conditions.freestream.temperature[:,0,None]
Vv = conditions.frames.inertial.velocity_vector
nu = mu/rho
rho_0 = rho
# Helpful shorthands
pi = np.pi
# Calculate total blade pitch
total_blade_pitch = beta_0 + pitch_c
# Velocity in the rotor frame
T_body2inertial = conditions.frames.body.transform_to_inertial
T_inertial2body = orientation_transpose(T_body2inertial)
V_body = orientation_product(T_inertial2body,Vv)
body2thrust = self.body_to_prop_vel()
T_body2thrust = orientation_transpose(np.ones_like(T_body2inertial[:])*body2thrust)
V_thrust = orientation_product(T_body2thrust,V_body)
# Check and correct for hover
V = V_thrust[:,0,None]
V[V==0.0] = 1E-6
# Number of radial stations and segment control points
Nr = len(c)
ctrl_pts = len(Vv)
# Non-dimensional radial distribution and differential radius
chi = r_1d/R
diff_r = np.diff(r_1d)
deltar = np.zeros(len(r_1d))
deltar[1:-1] = diff_r[0:-1]/2 + diff_r[1:]/2
deltar[0] = diff_r[0]/2
deltar[-1] = diff_r[-1]/2
# Calculating rotational parameters
omegar = np.outer(omega,r_1d)
n = omega/(2.*pi) # Rotations per second
# 2 dimensional radial distribution non dimensionalized
chi_2d = np.tile(chi[:, None],(1,Na))
chi_2d = np.repeat(chi_2d[None,:,:], ctrl_pts, axis=0)
r_dim_2d = np.tile(r_1d[:, None] ,(1,Na))
r_dim_2d = np.repeat(r_dim_2d[None,:,:], ctrl_pts, axis=0)
c_2d = np.tile(c[:, None] ,(1,Na))
c_2d = np.repeat(c_2d[None,:,:], ctrl_pts, axis=0)
# Azimuthal distribution of stations
psi = np.linspace(0,2*pi,Na+1)[:-1]
psi_2d = np.tile(np.atleast_2d(psi),(Nr,1))
psi_2d = np.repeat(psi_2d[None, :, :], ctrl_pts, axis=0)
# apply blade sweep to azimuthal position
if np.any(np.array([sweep])!=0):
use_2d_analysis = True
sweep_2d = np.repeat(sweep[:, None], (1,Na))
sweep_offset_angles = np.tan(sweep_2d/r_dim_2d)
psi_2d += sweep_offset_angles
# Starting with uniform freestream
ua = 0
ut = 0
ur = 0
# Include velocities introduced by rotor incidence angles
if (np.any(abs(V_thrust[:,1]) >1e-3) or np.any(abs(V_thrust[:,2]) >1e-3)) and use_2d_analysis:
# y-component of freestream in the propeller cartesian plane
Vy = V_thrust[:,1,None,None]
Vy = np.repeat(Vy, Nr,axis=1)
Vy = np.repeat(Vy, Na,axis=2)
# z-component of freestream in the propeller cartesian plane
Vz = V_thrust[:,2,None,None]
Vz = np.repeat(Vz, Nr,axis=1)
Vz = np.repeat(Vz, Na,axis=2)
# check for invalid rotation angle
if (rotation == 1) or (rotation == -1):
pass
else:
print("Invalid rotation direction. Setting to 1.")
rotation = 1
# compute resulting radial and tangential velocities in polar frame
utz = Vz*np.cos(psi_2d* rotation)
urz = Vz*np.sin(psi_2d* rotation)
uty = Vy*np.sin(psi_2d* rotation)
ury = Vy*np.cos(psi_2d* rotation)
ut += (utz + uty)
ur += (urz + ury)
ua += np.zeros_like(ut)
# Include external velocities introduced by user
if nonuniform_freestream:
use_2d_analysis = True
# include additional influences specified at rotor sections, shape=(ctrl_pts,Nr,Na)
ua += self.axial_velocities_2d
ut += self.tangential_velocities_2d
ur += self.radial_velocities_2d
if use_2d_analysis:
# make everything 2D with shape (ctrl_pts,Nr,Na)
size = (ctrl_pts,Nr,Na )
PSI = np.ones(size)
PSIold = np.zeros(size)
# 2-D freestream velocity and omega*r
V_2d = V_thrust[:,0,None,None]
V_2d = np.repeat(V_2d, Na,axis=2)
V_2d = np.repeat(V_2d, Nr,axis=1)
omegar = (np.repeat(np.outer(omega,r_1d)[:,:,None], Na, axis=2))
# total velocities
Ua = V_2d + ua
# 2-D blade pitch and radial distributions
if np.size(pitch_c)>1:
# control variable is the blade pitch, repeat around azimuth
beta = np.repeat(total_blade_pitch[:,:,None], Na, axis=2)
else:
beta = np.tile(total_blade_pitch[None,:,None],(ctrl_pts,1,Na ))
r = np.tile(r_1d[None,:,None], (ctrl_pts, 1, Na))
c = np.tile(c[None,:,None], (ctrl_pts, 1, Na))
deltar = np.tile(deltar[None,:,None], (ctrl_pts, 1, Na))
# 2-D atmospheric properties
a = np.tile(np.atleast_2d(a),(1,Nr))
a = np.repeat(a[:, :, None], Na, axis=2)
nu = np.tile(np.atleast_2d(nu),(1,Nr))
nu = np.repeat(nu[:, :, None], Na, axis=2)
rho = np.tile(np.atleast_2d(rho),(1,Nr))
rho = np.repeat(rho[:, :, None], Na, axis=2)
T = np.tile(np.atleast_2d(T),(1,Nr))
T = np.repeat(T[:, :, None], Na, axis=2)
else:
# total velocities
r = r_1d
Ua = np.outer((V + ua),np.ones_like(r))
beta = total_blade_pitch
# Things that will change with iteration
size = (ctrl_pts,Nr)
PSI = np.ones(size)
PSIold = np.zeros(size)
# Total velocities
Ut = omegar - ut
U = np.sqrt(Ua*Ua + Ut*Ut + ur*ur)
if wake_method == 'momentum':
# Setup a Newton iteration
diff = 1.
tol = 1e-6 # Convergence tolerance
ii = 0
# BEMT Iteration
while (diff>tol):
# compute velocities
sin_psi = np.sin(PSI)
cos_psi = np.cos(PSI)
Wa = 0.5*Ua + 0.5*U*sin_psi
Wt = 0.5*Ut + 0.5*U*cos_psi
va = Wa - Ua
vt = Ut - Wt
# compute blade airfoil forces and properties
Cl, Cdval, alpha, Ma, W = compute_airfoil_aerodynamics(beta,c,r,R,B,Wa,Wt,a,nu,a_loc,a_geo,cl_sur,cd_sur,ctrl_pts,Nr,Na,tc,use_2d_analysis)
# compute inflow velocity and tip loss factor
lamdaw, F, piece = compute_inflow_and_tip_loss(r,R,Wa,Wt,B)
# compute Newton residual on circulation
Gamma = vt*(4.*pi*r/B)*F*(1.+(4.*lamdaw*R/(pi*B*r))*(4.*lamdaw*R/(pi*B*r)))**0.5
Rsquiggly = Gamma - 0.5*W*c*Cl
# use analytical derivative to get dR_dpsi
dR_dpsi = compute_dR_dpsi(B,beta,r,R,Wt,Wa,U,Ut,Ua,cos_psi,sin_psi,piece)
# update inflow angle
dpsi = -Rsquiggly/dR_dpsi
PSI = PSI + dpsi
diff = np.max(abs(PSIold-PSI))
PSIold = PSI
# If omega = 0, do not run BEMT convergence loop
if all(omega[:,0]) == 0. :
break
# If its really not going to converge
if np.any(PSI>pi/2) and np.any(dpsi>0.0):
print("Rotor BEMT did not converge to a solution (Stall)")
break
ii+=1
if ii>10000:
print("Rotor BEMT did not converge to a solution (Iteration Limit)")
break
elif wake_method == "helical_fixed_wake":
# converge on va for a semi-prescribed wake method
ii,ii_max = 0, 50
va_diff, tol = 1, 1e-3
while va_diff > tol:
# compute axial wake-induced velocity (a byproduct of the circulation distribution which is an input to the wake geometry)
va, vt = compute_HFW_inflow_velocities(self)
# compute new blade velocities
Wa = va + Ua
Wt = Ut - vt
# Compute aerodynamic forces based on specified input airfoil or surrogate
Cl, Cdval, alpha, Ma,W = compute_airfoil_aerodynamics(beta,c,r,R,B,Wa,Wt,a,nu,a_loc,a_geo,cl_sur,cd_sur,ctrl_pts,Nr,Na,tc,use_2d_analysis)
lamdaw, F, _ = compute_inflow_and_tip_loss(r,R,Wa,Wt,B)
va_diff = np.max(abs(va - self.outputs.disc_axial_induced_velocity))
# compute HFW circulation at the blade
Gamma = 0.5*W*c*Cl
# update the axial disc velocity based on new va from HFW
self.outputs.disc_axial_induced_velocity = self.outputs.disc_axial_induced_velocity + 0.5*(va - self.outputs.disc_axial_induced_velocity)
ii+=1
if ii>ii_max and va_diff>tol:
print("Semi-prescribed helical wake did not converge on axial inflow used for wake shape.")
# tip loss correction for velocities, since tip loss correction is only applied to loads in prior BEMT iteration
va = F*va
vt = F*vt
lamdaw = r*(va+Ua)/(R*(Ut-vt))
# More Cd scaling from Mach from AA241ab notes for turbulent skin friction
Tw_Tinf = 1. + 1.78*(Ma*Ma)
Tp_Tinf = 1. + 0.035*(Ma*Ma) + 0.45*(Tw_Tinf-1.)
Tp = (Tp_Tinf)*T
Rp_Rinf = (Tp_Tinf**2.5)*(Tp+110.4)/(T+110.4)
Cd = ((1/Tp_Tinf)*(1/Rp_Rinf)**0.2)*Cdval
epsilon = Cd/Cl
epsilon[epsilon==np.inf] = 10.
# thrust and torque and their derivatives on the blade.
blade_T_distribution = rho*(Gamma*(Wt-epsilon*Wa))*deltar
blade_Q_distribution = rho*(Gamma*(Wa+epsilon*Wt)*r)*deltar
blade_dT_dr = rho*(Gamma*(Wt-epsilon*Wa))
blade_dQ_dr = rho*(Gamma*(Wa+epsilon*Wt)*r)
if use_2d_analysis:
blade_T_distribution_2d = blade_T_distribution
blade_Q_distribution_2d = blade_Q_distribution
blade_dT_dr_2d = blade_dT_dr
blade_dQ_dr_2d = blade_dQ_dr
blade_Gamma_2d = Gamma
alpha_2d = alpha
Va_2d = Wa
Vt_2d = Wt
Va_avg = np.average(Wa, axis=2) # averaged around the azimuth
Vt_avg = np.average(Wt, axis=2) # averaged around the azimuth
Va_ind_2d = va
Vt_ind_2d = vt
Vt_ind_avg = np.average(vt, axis=2)
Va_ind_avg = np.average(va, axis=2)
# set 1d blade loadings to be the average:
blade_T_distribution = np.mean((blade_T_distribution_2d), axis = 2)
blade_Q_distribution = np.mean((blade_Q_distribution_2d), axis = 2)
blade_dT_dr = np.mean((blade_dT_dr_2d), axis = 2)
blade_dQ_dr = np.mean((blade_dQ_dr_2d), axis = 2)
# compute the hub force / rotor drag distribution along the blade
dL_2d = 0.5*rho*c_2d*Cd*omegar**2*deltar
dD_2d = 0.5*rho*c_2d*Cl*omegar**2*deltar
rotor_drag_distribution = np.mean(dL_2d*np.sin(psi_2d) + dD_2d*np.cos(psi_2d),axis=2)
else:
Va_2d = np.repeat(Wa[ :, :, None], Na, axis=2)
Vt_2d = np.repeat(Wt[ :, :, None], Na, axis=2)
blade_T_distribution_2d = np.repeat(blade_T_distribution[:, :, None], Na, axis=2)
blade_Q_distribution_2d = np.repeat(blade_Q_distribution[:, :, None], Na, axis=2)
blade_dT_dr_2d = np.repeat(blade_dT_dr[:, :, None], Na, axis=2)
blade_dQ_dr_2d = np.repeat(blade_dQ_dr[:, :, None], Na, axis=2)
blade_Gamma_2d = np.repeat(Gamma[ :, :, None], Na, axis=2)
alpha_2d = np.repeat(alpha[ :, :, None], Na, axis=2)
Vt_avg = Wt
Va_avg = Wa
Vt_ind_avg = vt
Va_ind_avg = va
Va_ind_2d = np.repeat(va[ :, :, None], Na, axis=2)
Vt_ind_2d = np.repeat(vt[ :, :, None], Na, axis=2)
# compute the hub force / rotor drag distribution along the blade
dL = 0.5*rho*c*Cd*omegar**2*deltar
dL_2d = np.repeat(dL[:, :, None], Na, axis=2)
dD = 0.5*rho*c*Cl*omegar**2*deltar
dD_2d = np.repeat(dD[:, :, None], Na, axis=2)
rotor_drag_distribution = np.mean(dL_2d*np.sin(psi_2d) + dD_2d*np.cos(psi_2d),axis=2)
# forces
thrust = np.atleast_2d((B * np.sum(blade_T_distribution, axis = 1))).T
torque = np.atleast_2d((B * np.sum(blade_Q_distribution, axis = 1))).T
rotor_drag = np.atleast_2d((B * np.sum(rotor_drag_distribution, axis=1))).T
power = omega*torque
# calculate coefficients
D = 2*R
Cq = torque/(rho_0*(n*n)*(D*D*D*D*D))
Ct = thrust/(rho_0*(n*n)*(D*D*D*D))
Cp = power/(rho_0*(n*n*n)*(D*D*D*D*D))
Crd = rotor_drag/(rho_0*(n*n)*(D*D*D*D))
etap = V*thrust/power
# prevent things from breaking
Cq[Cq<0] = 0.
Ct[Ct<0] = 0.
Cp[Cp<0] = 0.
thrust[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
power[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
torque[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
rotor_drag[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
thrust[omega<0.0] = -thrust[omega<0.0]
thrust[omega==0.0] = 0.0
power[omega==0.0] = 0.0
torque[omega==0.0] = 0.0
rotor_drag[omega==0.0] = 0.0
Ct[omega==0.0] = 0.0
Cp[omega==0.0] = 0.0
etap[omega==0.0] = 0.0
# Make the thrust a 3D vector
thrust_prop_frame = np.zeros((ctrl_pts,3))
thrust_prop_frame[:,0] = thrust[:,0]
thrust_vector = orientation_product(orientation_transpose(T_body2thrust),thrust_prop_frame)
# Assign efficiency to network
conditions.propulsion.etap = etap
# Store data
self.azimuthal_distribution = psi
results_conditions = Data
outputs = results_conditions(
number_radial_stations = Nr,
number_azimuthal_stations = Na,
disc_radial_distribution = r_dim_2d,
speed_of_sound = conditions.freestream.speed_of_sound,
density = conditions.freestream.density,
velocity = Vv,
blade_tangential_induced_velocity = Vt_ind_avg,
blade_axial_induced_velocity = Va_ind_avg,
blade_tangential_velocity = Vt_avg,
blade_axial_velocity = Va_avg,
disc_tangential_induced_velocity = Vt_ind_2d,
disc_axial_induced_velocity = Va_ind_2d,
disc_tangential_velocity = Vt_2d,
disc_axial_velocity = Va_2d,
drag_coefficient = Cd,
lift_coefficient = Cl,
omega = omega,
disc_circulation = blade_Gamma_2d,
blade_dT_dr = blade_dT_dr,
disc_dT_dr = blade_dT_dr_2d,
blade_thrust_distribution = blade_T_distribution,
disc_thrust_distribution = blade_T_distribution_2d,
disc_effective_angle_of_attack = alpha_2d,
thrust_per_blade = thrust/B,
thrust_coefficient = Ct,
disc_azimuthal_distribution = psi_2d,
blade_dQ_dr = blade_dQ_dr,
disc_dQ_dr = blade_dQ_dr_2d,
blade_torque_distribution = blade_Q_distribution,
disc_torque_distribution = blade_Q_distribution_2d,
torque_per_blade = torque/B,
torque_coefficient = Cq,
power = power,
power_coefficient = Cp,
converged_inflow_ratio = lamdaw,
propeller_efficiency = etap,
blade_H_distribution = rotor_drag_distribution,
rotor_drag = rotor_drag,
rotor_drag_coefficient = Crd,
)
return thrust_vector, torque, power, Cp, outputs , etap
def spin_HFW(self,conditions):
"""Analyzes a general rotor given geometry and operating conditions.
Runs the blade element theory with a helical fixed-wake model for the
iterative wake analysis.
Assumptions:
Helical fixed-wake with wake skew angle
Source:
N/A
Inputs:
self.inputs.omega [radian/s]
conditions.freestream.
density [kg/m^3]
dynamic_viscosity [kg/(m-s)]
speed_of_sound [m/s]
temperature [K]
conditions.frames.
body.transform_to_inertial (rotation matrix)
inertial.velocity_vector [m/s]
conditions.propulsion.
throttle [-]
Outputs:
conditions.propulsion.outputs.
number_radial_stations [-]
number_azimuthal_stations [-]
disc_radial_distribution [m]
speed_of_sound [m/s]
density [kg/m-3]
velocity [m/s]
disc_tangential_induced_velocity [m/s]
disc_axial_induced_velocity [m/s]
disc_tangential_velocity [m/s]
disc_axial_velocity [m/s]
drag_coefficient [-]
lift_coefficient [-]
omega [rad/s]
disc_circulation [-]
blade_dQ_dR [N/m]
blade_dT_dr [N]
blade_thrust_distribution [N]
disc_thrust_distribution [N]
thrust_per_blade [N]
thrust_coefficient [-]
azimuthal_distribution [rad]
disc_azimuthal_distribution [rad]
blade_dQ_dR [N]
blade_dQ_dr [Nm]
blade_torque_distribution [Nm]
disc_torque_distribution [Nm]
torque_per_blade [Nm]
torque_coefficient [-]
power [W]
power_coefficient [-]
Properties Used:
self.
number_of_blades [-]
tip_radius [m]
twist_distribution [radians]
chord_distribution [m]
orientation_euler_angles [rad, rad, rad]
"""
#--------------------------------------------------------------------------------
# Initialize by running BEMT to get initial blade circulation
#--------------------------------------------------------------------------------
_, _, _, _, bemt_outputs , _ = self.spin(conditions)
conditions.noise.sources.propellers[self.tag] = bemt_outputs
self.outputs = bemt_outputs
omega = self.inputs.omega
#--------------------------------------------------------------------------------
# generate rotor wake vortex distribution
#--------------------------------------------------------------------------------
props = Data()
props.propeller = self
# generate wake distribution for n rotor rotation
nrots = self.number_rotor_rotations
steps_per_rot = self.number_steps_per_rotation
rpm = omega/Units.rpm
# simulation parameters for n rotor rotations
init_timestep_offset = 0.
time = 60*nrots/rpm[0][0]
number_of_wake_timesteps = steps_per_rot*nrots
self.wake_settings.init_timestep_offset = init_timestep_offset
self.wake_settings.wake_development_time = time
self.wake_settings.number_of_wake_timesteps = number_of_wake_timesteps
self.use_2d_analysis = True
# spin propeller with helical fixed-wake
self.wake_method = "helical_fixed_wake"
thrust_vector, torque, power, Cp, outputs , etap = self.spin(conditions)
return thrust_vector, torque, power, Cp, outputs , etap
def vec_to_vel(self):
"""This rotates from the propellers vehicle frame to the propellers velocity frame
Assumptions:
There are two propeller frames, the vehicle frame describing the location and the propeller velocity frame
velocity frame is X out the nose, Z towards the ground, and Y out the right wing
vehicle frame is X towards the tail, Z towards the ceiling, and Y out the right wing
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
rot_mat = sp.spatial.transform.Rotation.from_rotvec([0,np.pi,0]).as_matrix()
return rot_mat
def body_to_prop_vel(self):
"""This rotates from the systems body frame to the propellers velocity frame
Assumptions:
There are two propeller frames, the vehicle frame describing the location and the propeller velocity frame
velocity frame is X out the nose, Z towards the ground, and Y out the right wing
vehicle frame is X towards the tail, Z towards the ceiling, and Y out the right wing
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
# Go from body to vehicle frame
body_2_vehicle = sp.spatial.transform.Rotation.from_rotvec([0,np.pi,0]).as_matrix()
# Go from vehicle frame to propeller vehicle frame: rot 1 including the extra body rotation
rots = np.array(self.orientation_euler_angles) * 1.
rots[1] = rots[1] + self.inputs.y_axis_rotation
vehicle_2_prop_vec = sp.spatial.transform.Rotation.from_rotvec(rots).as_matrix()
# GO from the propeller vehicle frame to the propeller velocity frame: rot 2
prop_vec_2_prop_vel = self.vec_to_vel()
# Do all the matrix multiplies
rot1 = | np.matmul(body_2_vehicle,vehicle_2_prop_vec) | numpy.matmul |
# -*- coding: utf-8 -*-
# _realizeNTF_ct.py
# Module providing the realizeNTF_ct function
# Copyright 2013 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""Module providing the realizeNTF_ct() function
"""
from __future__ import division, print_function
from warnings import warn
import numpy as np
import numpy.linalg as linalg
from scipy.signal import dimpulse, ss2zpk
from ._evalTFP import evalTFP
from ._impL1 import impL1
from ._padb import padb
from ._pulse import pulse
from ._utils import _get_zpk, carray, eps
def realizeNTF_ct(ntf, form='FB', tdac=(0, 1), ordering=None, bp=None,
ABCDc=None, method='LOOP'):
"""Realize an NTF with a continuous-time loop filter.
**Parameters:**
ntf : object
A noise transfer function (NTF).
form : str, optional
A string specifying the topology of the loop filter.
* 'FB': Feedback form,
* 'FF': Feedforward form
For the FB structure, the elements of ``Bc`` are calculated
so that the sampled pulse response matches the L1 impulse
response. For the FF structure, ``Cc`` is calculated.
tdac : sequence, optional
The timing for the feedback DAC(s). If ``tdac[0] >= 1``,
direct feedback terms are added to the quantizer.
Multiple timings (one or more per integrator) for the FB
topology can be specified by making tdac a list of lists,
e.g. ``tdac = [[1, 2], [1, 2], [[0.5, 1], [1, 1.5]], []]``
In this example, the first two integrators have
DACs with ``[1, 2]`` timing, the third has a pair of
DACs, one with ``[0.5, 1]`` timing and the other with
``[1, 1.5]`` timing, and there is no direct feedback
DAC to the quantizer.
ordering : sequence, optional
A vector specifying which NTF zero-pair to use in each resonator
Default is for the zero-pairs to be used in the order specified
in the NTF.
bp : sequence, optional
A vector specifying which resonator sections are bandpass.
The default (``zeros(...)``) is for all sections to be lowpass.
ABCDc : ndarray, optional
The loop filter structure, in state-space form.
If this argument is omitted, ABCDc is constructed according
to "form."
method : str, optional
The default fitting method is ``'LOOP'``, which means that
the DT and CT loop responses will be matched.
Alternatively, it is possible to set the method to ``'NTF'``,
which will result in the NTF responses to be matched.
See :ref:`discrete-time-to-continuous-time-mapping` for a
more in-depth discussion.
**Returns:**
ABCDc : ndarray
A state-space description of the CT loop filter
tdac2 : ndarray
A matrix with the DAC timings, including ones
that were automatically added.
**Example:**
Realize the NTF :math:`(1 - z^{-1})^2` with a CT system (cf with the
example at :func:`mapCtoD`).::
from deltasigma import *
ntf = ([1, 1], [0, 0], 1)
ABCDc, tdac2 = realizeNTF_ct(ntf, 'FB')
Returns:
ABCDc::
[[ 0. 0. 1. -1. ]
[ 1. 0. 0. -1.49999999]
[ 0. 1. 0. 0. ]]
tdac2::
[[-1. -1.]
[ 0. 1.]]
"""
ntf_z, ntf_p, _ = _get_zpk(ntf)
ntf_z = carray(ntf_z)
ntf_p = carray(ntf_p)
order = max(ntf_p.shape)
order2 = int(np.floor(order/2.))
odd = order - 2*order2
# compensate for limited accuracy of zero calculation
ntf_z[np.abs(ntf_z - 1) < eps**(1./(1. + order))] = 1.
method = method.upper()
if method not in ('LOOP', 'NTF'):
raise ValueError('Unimplemented matching method %s.' % method)
# check if multiple timings mode
if (type(tdac) == list or type(tdac) == tuple) and len(tdac) and \
(type(tdac[0]) == list or type(tdac[0]) == tuple):
if len(tdac) != order + 1:
msg = 'For multi-timing tdac, len(tdac) ' + \
' must be order+1.'
raise ValueError(msg)
if form != 'FB':
msg = "Currently only supporting form='FB' " + \
'for multi-timing tdac'
raise ValueError(msg)
multi_timing = True
else: # single timing
tdac = carray(tdac)
if np.prod(tdac.shape) != 2:
msg = 'For single-timing tdac, len(tdac) must be 2.'
raise ValueError(msg)
tdac.reshape((2,))
multi_timing = False
if ordering is None:
ordering = np.arange(order2)
if bp is None:
bp = np.zeros((order2,))
if not multi_timing:
# Need direct terms for every interval of memory in the DAC
n_direct = np.ceil(tdac[1]) - 1
if tdac[0] > 0 and tdac[0] < 1 and tdac[1] > 1 and tdac[1] < 2:
n_extra = n_direct - 1 # tdac pulse spans a sample point
else:
n_extra = n_direct
tdac2 = np.vstack(
(np.array((-1, -1)),
np.array(tdac).reshape((1, 2)),
0.5*np.dot(np.ones((n_extra, 1)), np.array([[-1, 1]]))
+ np.cumsum(np.ones((n_extra, 2)), 0) + (n_direct - n_extra)
))
else:
n_direct = 0
n_extra = 0
if ABCDc is None:
ABCDc = np.zeros((order + 1, order + 2))
# Stuff the A portion
if odd:
ABCDc[0, 0] = np.real(np.log(ntf_z[0]))
ABCDc[1, 0] = 1
dline = np.array([0, 1, 2])
for i in range(order2):
n = bp[i]
i1 = 2*i + odd
zi = 2*ordering[i] + odd
w = np.abs(np.angle(ntf_z[zi]))
ABCDc[i1 + dline, i1] = np.array([0, 1, n])
ABCDc[i1 + dline, i1 + 1] = np.array([-w**2, 0, 1 - n])
ABCDc[0, order] = 1
# 2006.10.02 Changed to -1 to make FF STF have +ve gain at DC
ABCDc[0, order + 1] = -1
Ac = ABCDc[:order, :order]
if form == 'FB':
Cc = ABCDc[order, :order].reshape((1, -1))
if not multi_timing:
Bc = np.hstack((np.eye(order), np.zeros((order, 1))))
Dc = np.hstack((np.zeros((1, order)), np.array([[1]])))
tp = np.tile(np.array(tdac).reshape((1, 2)), (order + 1, 1))
else: #Assemble tdac2, Bc and Dc
tdac2 = np.array([[-1, -1]])
Bc = None
Dc = None
Bci = np.hstack((np.eye(order), np.zeros((order, 1))))
Dci = np.hstack((np.zeros((1, order)), np.array([[1]])))
for i in range(len(tdac)):
tdi = tdac[i]
if (type(tdi) in (tuple, list)) and len(tdi) and \
(type(tdi[0]) in (list, tuple)):
for j in range(len(tdi)):
tdj = tdi[j]
tdac2 = np.vstack((tdac2,
np.array(tdj).reshape(1,-1)))
if Bc is not None:
Bc = np.hstack((Bc, Bci[:, i].reshape((-1, 1))))
else:
Bc = Bci[:, i].reshape((-1, 1))
if Dc is not None:
Dc = np.hstack((Dc, Dci[:, i].reshape((-1, 1))))
else:
Dc = Dci[:, i].reshape((-1, 1))
elif len(tdi): # we got tdac[i] = [a, b] where a, b are scalars
tdac2 = np.vstack((tdac2,
np.array(tdi).reshape(1,-1)))
if Bc is not None:
Bc = np.hstack((Bc, Bci[:, i].reshape((-1, 1))))
else:
Bc = Bci[:, i].reshape((-1, 1))
if Dc is not None:
Dc = np.hstack((Dc, Dci[:, i].reshape((-1, 1))))
else:
Dc = Dci[:, i].reshape((-1, 1))
tp = tdac2[1:, :]
elif form == 'FF':
Cc = np.vstack((np.eye(order), np.zeros((1, order))))
Bc = np.vstack((np.array([[-1]]), np.zeros((order-1, 1))))
Dc = np.vstack((np.zeros((order, 1)), np.array([[1]])))
tp = tdac # 2008-03-24 fix from <NAME>
else:
raise ValueError('Sorry, no code for form "%s".', form)
n_imp = np.ceil(2*order + | np.max(tdac2[:, 1]) | numpy.max |
import os
import sys
import datetime
import json
import logging
import imageio
import PIL.Image
import numpy as np
import matplotlib.pyplot as plt
from plark_game import classes
from gym_plark.envs import plark_env
from gym_plark.envs.plark_env_sparse import PlarkEnvSparse
from gym_plark.envs.plark_env import PlarkEnv
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common.env_checker import check_env
from stable_baselines import DQN, PPO2, A2C, ACKTR
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import DummyVecEnv, VecEnv
from copy import deepcopy
# PyTorch Stable Baselines
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import SubprocVecEnv as SubprocVecEnv_Torch
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
DEFAULT_FPS = 3 # Originally was 10
BASEWIDTH = 512 # Originally was 512, increase/decrease for higher/lower resolution
def model_label(modeltype,basicdate,modelplayer):
label = modeltype + "_" + str(basicdate) + "_" + modelplayer
return label
def make_new_model(model_type,policy,env, n_steps=100, tensorboard_log=None):
if model_type.lower() == 'dqn':
model = DQN(policy,env,tensorboard_log=tensorboard_log)
elif model_type.lower() == 'ppo2':
model = PPO2(policy,env,tensorboard_log=tensorboard_log)
elif model_type.lower() == 'ppo':
model = PPO(policy,env, n_steps=n_steps)
elif model_type.lower() == 'a2c':
model = A2C(policy,env,tensorboard_log=tensorboard_log)
elif model_type.lower() == 'acktr':
model = ACKTR(policy,env,tensorboard_log=tensorboard_log)
return model
def train_until(model, env, victory_threshold=0.8, victory_trials=10, max_seconds=120, testing_interval=200, tb_writer=None, tb_log_name=None):
steps = 0
max_victory_fraction = 0.0
initial_time = datetime.datetime.now()
current_time = datetime.datetime.now()
elapsed_seconds = (current_time - initial_time).total_seconds()
while elapsed_seconds < max_seconds:
logger.info("Training for {} steps".format(testing_interval))
before_learning = datetime.datetime.now()
model.learn(testing_interval)
after_learning = datetime.datetime.now()
steps = steps + testing_interval
logger.info("Learning took {:.2f} seconds".format((after_learning - before_learning).total_seconds()))
logger.info("Checking victory")
victory_count, avg_reward = check_victory(model, env, trials=victory_trials)
after_check = datetime.datetime.now()
logger.info("Victory check took {:.2f} seconds".format((after_check - after_learning).total_seconds()))
victory_fraction = float(victory_count)/victory_trials
logger.info("Won {} of {} evaluations ({:.2f})".format(victory_count, victory_trials, victory_fraction))
max_victory_fraction = max(max_victory_fraction, victory_fraction)
if tb_writer is not None and tb_log_name is not None:
tb_writer.add_scalar('{}_avg_reward'.format(tb_log_name), avg_reward, steps)
tb_writer.add_scalar('{}_victory_count'.format(tb_log_name), victory_count, steps)
tb_writer.add_scalar('{}_victory_fraction'.format(tb_log_name), victory_fraction, steps)
current_time = datetime.datetime.now()
elapsed_seconds = (current_time - initial_time).total_seconds()
if victory_fraction >= victory_threshold:
logger.info("Achieved victory threshold after {} steps".format(steps))
break
logger.info("Achieved max victory fraction {:.2f} after {} seconds ({} steps)".format(max_victory_fraction, elapsed_seconds, steps))
achieved_goal = max_victory_fraction >= victory_threshold
return achieved_goal, steps, elapsed_seconds
def check_victory(model, env, trials):
if isinstance(env, SubprocVecEnv_Torch):
list_of_reward, n_steps, victories = evaluate_policy_torch(model, env, n_eval_episodes=trials, deterministic=False, render=False, callback=None, reward_threshold=None, return_episode_rewards=True)
else:
list_of_reward, n_steps, victories = evaluate_policy(model, env, n_eval_episodes=trials, deterministic=False, render=False, callback=None, reward_threshold=None, return_episode_rewards=True)
logger.info("===================================================")
modelplayer = env.get_attr('driving_agent')[0]
logger.info('In check_victory, driving_agent: %s' % modelplayer)
avg_reward = float(sum(list_of_reward))/len(list_of_reward)
victory_count = len([v for v in victories if v == True])
victory_prop = float(victory_count)/len(victories)
logger.info('victory_prop: %.2f (%s out of %s); avg_reward: %.3f' %
(victory_prop,
victory_count,
len(victories),
avg_reward
))
logger.info("===================================================")
return victory_prop, avg_reward,
def evaluate_policy_torch(model, env, n_eval_episodes, deterministic=True,
render=False,
callback=None,
reward_threshold=None,
return_episode_rewards=False):
"""
Modified from https://stable-baselines.readthedocs.io/en/master/_modules/stable_baselines/common/evaluation.html#evaluate_policy
to return additional info
"""
logger.debug("Evaluating policy")
episode_rewards, episode_lengths, victories = [], [], []
obs = env.reset()
episodes_reward = [0.0 for _ in range(env.num_envs)]
episodes_len = [0.0 for _ in range(env.num_envs)]
state = None
logger.debug("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
logger.debug("In evaluate_policy_torch, n_eval_episodes: %s" % n_eval_episodes)
logger.debug("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
while len(episode_rewards) < n_eval_episodes:
action, state = model.predict(obs, state=state, deterministic=deterministic)
obs, rewards, dones, _infos = env.step(action)
for i in range(len(rewards)):
episodes_reward[i] += rewards[i]
episodes_len[i] += 1
if render:
env.render()
if dones.any():
for i, d in enumerate(dones):
if d:
info = _infos[i]
victory = info['result'] == "WIN"
victories.append(victory)
episode_rewards.append(episodes_reward[i])
episode_lengths.append(episodes_len[i])
episodes_reward[i] = 0
episodes_len[i] = 0
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: {:.2f} < {:.2f}".format(mean_reward, reward_threshold)
if return_episode_rewards:
return episode_rewards, episode_lengths, victories
return mean_reward, std_reward, victories
def evaluate_policy(model, env, n_eval_episodes=4, deterministic=True, render=False, callback=None, reward_threshold=None, return_episode_rewards=False):
"""
Modified from https://stable-baselines.readthedocs.io/en/master/_modules/stable_baselines/common/evaluation.html#evaluate_policy
to return additional info
"""
logger.debug("Evaluating policy")
episode_rewards, episode_lengths, victories = [], [], []
for ep in range(n_eval_episodes):
logger.debug("Evaluating episode {} of {}".format(ep, n_eval_episodes))
obs = env.reset()
ep_done, state = False, None
episode_length = 0
episode_reward = 0.0
if isinstance(env, VecEnv) or isinstance(env, SubprocVecEnv_Torch):
episodes_reward = [0.0 for _ in range(env.num_envs)]
else:
episodes_reward = [0.0]
victory = False
while not ep_done:
action, state = model.predict(obs, state=state, deterministic=deterministic)
obs, rewards, dones, _infos = env.step(action)
if not isinstance(env, VecEnv):
rewards = [rewards]
dones = np.array([dones])
_infos = [_infos]
episode_length += 1
for i in range(len(rewards)):
episodes_reward[i] += rewards[i]
if callback is not None:
callback(locals(), globals())
if episode_length > 1000:
logger.warning("Episode over 1000 steps.")
if render:
env.render()
if any(dones):
first_done_index = dones.tolist().index(True)
info = _infos[first_done_index]
victory = info['result'] == "WIN"
episode_reward = episodes_reward[first_done_index]
ep_done = True
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
victories.append(victory)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: {:.2f} < {:.2f}".format(mean_reward, reward_threshold)
if return_episode_rewards:
return episode_rewards, episode_lengths, victories
return mean_reward, std_reward, victories
def get_env(driving_agent,
config_file_path,
opponent=None,
image_based=False,
random_panther_start_position=True,
max_illegal_moves_per_turn = 3,
sparse=False,
normalise=False,
is_in_vec_env=False):
params = dict(driving_agent = driving_agent,
config_file_path = config_file_path,
image_based = image_based,
random_panther_start_position = random_panther_start_position,
max_illegal_moves_per_turn = max_illegal_moves_per_turn,
normalise=normalise,
is_in_vec_env=is_in_vec_env)
if opponent != None and driving_agent == 'pelican':
params.update(panther_agent_filepath = opponent)
elif opponent != None and driving_agent == 'panther':
params.update(pelican_agent_filepath = opponent)
if sparse:
return PlarkEnvSparse(**params)
else:
return PlarkEnv(**params)
def get_envs(driving_agent,
config_file_path,
opponents=[],
num_envs=1,
image_based=False,
random_panther_start_position=True,
max_illegal_moves_per_turn=3,
sparse=False,
vecenv=True,
mixture=None,
normalise=False):
params = dict(driving_agent = driving_agent,
config_file_path = config_file_path,
image_based = image_based,
random_panther_start_position = random_panther_start_position,
max_illegal_moves_per_turn = max_illegal_moves_per_turn,
sparse = sparse,
normalise = normalise,
is_in_vec_env=vecenv)
if len(opponents) == 1:
params.update(opponent=opponents[0])
if vecenv == False:
return get_env(**params)
elif len(opponents) < 2:
return SubprocVecEnv_Torch([lambda:get_env(**params) for _ in range(num_envs)])
elif len(opponents) >= 2:
opponents = np.random.choice(opponents, size = num_envs, p = mixture)
params_list = []
for o in opponents:
params.update(opponent=o)
params_list.append(deepcopy(params))
return SubprocVecEnv_Torch([lambda:get_env(**params) for params in params_list])
# Save model base on env
def save_model_with_env_settings(basepath,model,modeltype,env,basicdate=None):
if basicdate is None:
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
if isinstance(env, VecEnv) or isinstance(env, SubprocVecEnv_Torch):
modelplayer = env.get_attr('driving_agent')[0]
render_height = env.get_attr('render_height')[0]
render_width = env.get_attr('render_width')[0]
image_based = env.get_attr('image_based')[0]
else:
modelplayer = env.driving_agent
render_height = env.render_height
render_width = env.render_width
image_based = env.image_based
model_path,model_dir, modellabel = save_model(basepath,model,modeltype,modelplayer,render_height,render_width,image_based,basicdate)
return model_path,model_dir, modellabel
# Saves model and metadata
def save_model(basepath,model,modeltype,modelplayer,render_height,render_width,image_based,basicdate=None):
if basicdate is None:
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
modellabel = model_label(modeltype,basicdate,modelplayer)
model_dir = os.path.join(basepath, modellabel)
logger.info("Checking folder: " + model_dir)
os.makedirs(model_dir, exist_ok=True)
os.chmod(model_dir, 0o777)
logger.info("Saving Metadata")
if isinstance(model.env, VecEnv) or isinstance(model.env, SubprocVecEnv_Torch):
normalise = model.env.get_attr('normalise')[0]
domain_params_in_obs = model.env.get_attr('domain_params_in_obs')[0]
else:
normalise = model.env.normalise
domain_params_in_obs = model.env.domain_params_in_obs
save_model_metadata(model_dir,modeltype,modelplayer,basicdate,render_height,render_width,image_based, normalise, domain_params_in_obs)
logger.info("Saving Model")
model_path = os.path.join(model_dir, modellabel + ".zip")
model.save(model_path)
logger.info('model_dir: '+model_dir)
logger.info('model_path: '+model_path)
return model_path,model_dir, modellabel
## Used for generating the json header file which holds details regarding the model.
## This will be used when playing the game from the GUI.
def save_model_metadata(model_dir,modeltype,modelplayer,dateandtime,render_height,render_width,image_based, normalise, domain_params_in_obs):
jsondata = {}
jsondata['algorithm'] = modeltype
jsondata['date'] = str(dateandtime)
jsondata['agentplayer'] = modelplayer
jsondata['render_height'] = render_height
jsondata['render_width'] = render_width
jsondata['image_based'] = image_based
jsondata['normalise'] = normalise
jsondata['domain_params_in_obs'] = domain_params_in_obs
json_path = os.path.join(model_dir, 'metadata.json')
with open(json_path, 'w') as outfile:
json.dump(jsondata, outfile)
logger.info('json saved to: '+json_path)
## Custom Model Evaluation Method for evaluating Plark games.
## Does require changes to how data is passed back from environments.
## Instead of using return ob, reward, done, {} use eturn ob, reward, done, {game.state}
def custom_eval(model, env, n_eval_episodes=10, deterministic=True,
render=False, callback=None, reward_threshold=None,
return_episode_rewards=False, player_type="PELICAN"):
"""
Runs policy for `n_eval_episodes` episodes and returns average reward.
This is made to work only with one env.
:param model: (BaseRLModel) The RL agent you want to evaluate.
:param env: (gym.Env or VecEnv) The gym environment. In the case of a `VecEnv`
this must contain only one environment.
:param n_eval_episodes: (int) Number of episode to evaluate the agent
:param deterministic: (bool) Whether to use deterministic or stochastic actions
:param render: (bool) Whether to render the environment or not
:param callback: (callable) callback function to do additional checks,
called after each step.
:param reward_threshold: (float) Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: (bool) If True, a list of reward per episode
will be returned instead of the mean.
:return: (float, float) Mean reward per episode, std of reward per episode
returns ([float], [int]) when `return_episode_rewards` is True
"""
if player_type == "PELICAN":
WINCONDITION = "PELICANWIN"
if player_type == "PANTHER":
WINCONDITION = "PANTHERWIN"
if isinstance(env, VecEnv):
assert env.num_envs == 1, "You must pass only one environment when using this function"
totalwin = 0
episode_rewards, episode_lengths = [], []
for _ in range(n_eval_episodes):
obs = env.reset()
done, state = False, None
episode_reward = 0.0
episode_length = 0
while not done:
action, state = model.predict(obs, state=state, deterministic=deterministic)
obs, reward, done, _info = env.step(action)
episode_reward += reward
if callback is not None:
callback(locals(), globals())
episode_length += 1
if render:
env.render()
if WINCONDITION in _info:
totalwin = totalwin + 1
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, 'Mean reward below threshold: '\
'{:.2f} < {:.2f}'.format(mean_reward, reward_threshold)
if return_episode_rewards:
return episode_rewards, episode_lengths, totalwin
return mean_reward, std_reward, totalwin
def loadAgent(filepath, algorithm_type):
try:
if algorithm_type.lower() == 'dqn':
model = DQN.load(filepath)
elif algorithm_type.lower() == 'ppo2':
model = PPO2.load(filepath)
elif algorithm_type.lower() == 'ppo':
model = PPO.load(filepath)
elif algorithm_type.lower() == 'a2c':
model = A2C.load(filepath)
elif algorithm_type.lower() == 'acktr':
model = ACKTR.load(filepath)
return model
except:
raise ValueError('Error loading agent. File : "' + filepath + '" does not exsist' )
def og_load_driving_agent_make_video(pelican_agent_filepath, pelican_agent_name, panther_agent_filepath, panther_agent_name, config_file_path='/Components/plark-game/plark_game/game_config/10x10/balanced.json',video_path='/Components/plark_ai_flask/builtangularSite/dist/assets/videos'):
"""
Method for loading and agent, making and environment, and making a video. Mainly used in notebooks.
"""
logger.info("Load driving agent make viedo - pelican agent filepast = " + pelican_agent_filepath)
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
video_file = basicdate+'.mp4'
video_file_path = os.path.join(video_path, video_file)
os.makedirs(video_path, exist_ok=True)
files = os.listdir(pelican_agent_filepath)
if len(files) > 0:
for f in files:
if '.zip' in f:
# load model
metadata_filepath = os.path.join(pelican_agent_filepath, 'metadata.json')
agent_filepath = os.path.join(pelican_agent_filepath, f)
with open(metadata_filepath) as f:
metadata = json.load(f)
logger.info('Playing against:'+agent_filepath)
if metadata['agentplayer'] == 'pelican':
pelican_agent = classes.Pelican_Agent_Load_Agent(agent_filepath, metadata['algorithm'])
pelican_model = pelican_agent.model
env = plark_env.PlarkEnv(driving_agent='pelican',panther_agent_filepath=panther_agent_filepath, panther_agent_name=panther_agent_name, config_file_path=config_file_path)
basewidth,hsize = make_video(pelican_model,env,video_file_path)
logger.info("This is the environment variable " + str(env))
elif metadata['agentplayer'] == 'panther':
raise ValueError('No Pelican agent found in ', pelican_agent_filepath)
else:
raise ValueError('no agent found in ', files)
return video_file, env.status,video_file_path
def load_driving_agent_make_video(pelican_agent_filepath, pelican_agent_name, panther_agent_filepath, panther_agent_name, config_file_path='/Components/plark-game/plark_game/game_config/10x10/balanced.json',video_path='/Components/plark_ai_flask/builtangularSite/dist/assets/videos',basic_agents_filepath='/Components/plark-game/plark_game/agents/basic', renderWidth=None, renderHeight=None):
"""
Method for loading and agent, making and environment, and making a video. Mainly used from flask server.
"""
basicdate = str(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
video_file = basicdate+'.mp4'
video_file_path = os.path.join(video_path, video_file)
os.makedirs(video_path, exist_ok=True)
kwargs = {
'driving_agent': "pelican",
'panther_agent_filepath': panther_agent_filepath,
'panther_agent_name': panther_agent_name,
}
game_env = classes.Environment()
game_env.createNewGame(config_file_path, **kwargs)
game = game_env.activeGames[len(game_env.activeGames)-1]
agent = classes.load_agent(pelican_agent_filepath,pelican_agent_name,basic_agents_filepath,game,**kwargs)
if renderHeight is None:
renderHeight = game.pelican_parameters['render_height']
if renderHeight is None:
renderWidth = game.pelican_parameters['render_width']
basewidth, hsize = new_make_video(agent, game, video_file_path, renderWidth, renderHeight)
return video_file, game.gameState ,video_file_path
def make_video_VEC_ENV(model, env, video_file_path,n_steps = 10000,fps=DEFAULT_FPS,deterministic=False,basewidth=BASEWIDTH,verbose=False):
# Test the trained agent
# This is when you have a stable baselines model and an gym env
obs = env.reset()
writer = imageio.get_writer(video_file_path, fps=fps)
hsize = None
for step in range(n_steps):
#######################################################################
# Get image and comvert back to PIL.Image
try:
image = PIL.Image.fromarray(env.render(mode='rgb_array'))
except:
print("NOT WORKED TO CONVERT BACK TO PIL")
#######################################################################
action, _ = model.predict(obs, deterministic=deterministic)
obs, reward, done, info = env.step(action)
if verbose:
logger.info("Step: "+str(step)+" Action: "+str(action)+' Reward:'+str(reward)+' Done:'+str(done))
if hsize is None:
wpercent = (basewidth/float(image.size[0]))
hsize = int((float(image.size[1])*float(wpercent)))
res_image = image.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
writer.append_data(np.copy(np.array(res_image)))
writer.close()
return basewidth,hsize
def make_video(model,env,video_file_path,n_steps = 10000,fps=DEFAULT_FPS,deterministic=False,basewidth=BASEWIDTH,verbose =False):
# Test the trained agent
# This is when you have a stable baselines model and an gym env
obs = env.reset()
writer = imageio.get_writer(video_file_path, fps=fps)
hsize = None
for step in range(n_steps):
image = env.render(view='ALL')
action, _ = model.predict(obs, deterministic=deterministic)
obs, reward, done, info = env.step(action)
if verbose:
logger.info("Step: "+str(step)+" Action: "+str(action)+' Reward:'+str(reward)+' Done:'+str(done))
if hsize is None:
wpercent = (basewidth/float(image.size[0]))
hsize = int((float(image.size[1])*float(wpercent)))
res_image = image.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
writer.append_data(np.copy( | np.array(res_image) | numpy.array |
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import sys
from astrometry.util.fits import *
from astrometry.util.plotutils import *
from astrometry.libkd.spherematch import match_radec
from tractor.sfd import SFDMap
from legacypipe.survey import *
def sample_in_radec_box(ralo, rahi, declo, dechi, N,
nbatch=1000):
'''
Draw N samples uniformly within the given RA,Dec box, correctly
handling the change of scale of RA with respect to Dec.
'''
rr,dd = [],[]
ntotal = 0
while ntotal < N:
# "unit" values ru in [0, 1) that will be scaled to RA
ru = np.random.uniform(size=nbatch)
# Draw Dec values
d = np.random.uniform(low=declo, high=dechi, size=nbatch)
# Taper the accepted width in RA based on Dec; reject ones outside
# NOTE that we could make this more efficient (reject fewer) by
# scaling by the min/max cos(Dec) values.
cosd = np.cos(np.deg2rad(d))
I = np.flatnonzero(ru < cosd)
if len(I) == 0:
continue
# Scale "ru" to RAs
r = ralo + (rahi - ralo) * ru[I]/cosd[I]
d = d[I]
rr.append(r)
dd.append(d)
ntotal += len(r)
#print('Kept', len(r), 'of', nbatch)
ra = np.hstack(rr)[:N]
dec = np.hstack(dd)[:N]
return ra,dec
def main():
ps = PlotSequence('shotgun')
survey = LegacySurveyData()
C = fits_table('survey-ccds-annotated.fits')
print(len(C), 'CCDs')
C.cut(C.photometric)
C.cut(C.blacklist_ok)
print(len(C), 'photometric and not blacklisted')
# HACK
print('FIXME not cutting on DECALS')
#C.cut(C.tilepass > 0)
#print(len(C), 'taken by DECaLS')
targets = dict(g=24.0, r=23.4, z=22.5)
def ivtomag(iv, nsigma=5.):
return -2.5 * (np.log10(nsigma / np.sqrt(iv)) - 9)
def band_index(band):
allbands = 'ugrizY'
return allbands.index(band)
ccmap = dict(g='g', r='r', z='m')
ceil_exptime = dict(g=125., r=125., z=250.)
#plt.clf()
bands = 'grz'
for band in bands:
tmag = targets[band]
print()
print(band, 'band, target depth', tmag)
ccds = C[C.filter == band]
ccdarea = (2046*4094*(0.262/3600.)**2)
print(len(ccds), 'CCDs, total exptime', np.sum(ccds.exptime),
'(mean %.1f)' % np.mean(ccds.exptime), 'total area',
len(ccds)*ccdarea, 'sq.deg')
detsig1 = ccds.sig1 / ccds.galnorm_mean
totiv = np.sum(1. / detsig1**2)
# depth we would have if we had all exposure time in one CCD
# print('5-sigma galaxy depth if concentrated in one CCD:', ivtomag(totiv))
# # mean depth
# print('5-sigma galaxy depth if spread equally among', len(ccds), 'CCDs:', ivtomag(totiv / len(ccds)))
# print('vs median depth', np.median(ccds.galdepth))
# print('5-sigma galaxy depth if spread equally among %i/2' % (len(ccds)), 'CCDs:', ivtomag(totiv / (len(ccds)/2)))
# print('5-sigma galaxy depth if spread equally among %i/3' % (len(ccds)), 'CCDs:', ivtomag(totiv / (len(ccds)/3)))
# spread over 6000 sq deg
sqdeg = 6000
avgiv = totiv * ccdarea / sqdeg
#print('5-sigma galaxy depth if spread over', sqdeg, 'sqdeg:', ivtomag(avgiv))
tflux = 10.**(tmag / -2.5 + 9)
tiv = 1. / (tflux / 5)**2
#print('Fraction of', sqdeg, 'sqdeg survey complete:', avgiv / tiv)
iband = band_index(band)
ext = ccds.decam_extinction[:,iband]
medext = np.median(ext)
print('With extinction (median %.2f mag):' % medext)
transmission = 10.**(-ext / 2.5)
detsig1 = ccds.sig1 / ccds.galnorm_mean / transmission
totiv = | np.sum(1. / detsig1**2) | numpy.sum |
import numpy as np
from ..adjacency import mask_adjacency_array, reindex_adjacency_array
from .base import TriMesh
class ColouredTriMesh(TriMesh):
r"""
Combines a :map:`TriMesh` with a colour per vertex.
Parameters
----------
points : ``(n_points, n_dims)`` `ndarray`
The array representing the points.
trilist : ``(M, 3)`` `ndarray` or ``None``, optional
The triangle list. If `None`, a Delaunay triangulation of
the points will be used instead.
colours : ``(N, 3)`` `ndarray`, optional
The floating point RGB colour per vertex. If not given, grey will be
assigned to each vertex.
copy: `bool`, optional
If ``False``, the points, trilist and colours will not be copied on
assignment.
In general this should only be used if you know what you are doing.
Raises
------
ValueError
If the number of colour values does not match the number of vertices.
"""
def __init__(self, points, trilist=None, colours=None, copy=True):
TriMesh.__init__(self, points, trilist=trilist, copy=copy)
# Handle the settings of colours, either be provided a default grey
# set of colours, or copy the given array if necessary
if colours is None:
# default to grey
colours_handle = | np.ones_like(points, dtype=np.float) | numpy.ones_like |
import numpy as np
import torch
import torch.nn as nn
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir) # add parent path
import torch.optim as optim
import matplotlib.pyplot as plt
from sim2real_policies.utils.choose_env import choose_env
from sim2real_policies.utils.policy_networks import DPG_PolicyNetwork, RandomPolicy
from mujoco_py import MujocoException
def query_params(env, normalize=True, randomised_only=False, dynamics_only=False):
"""
Query the dynamics parameters from the env,
normalize it if normalize is True,
only return randomised parameters if randomised_only is True,
only return dynamics parameters (no noise parameters) if dynamics_only is True.
Return: array of parameters
"""
params_dict = env.get_dynamics_parameters()
randomised_params_keys = env.get_randomised_parameters()
params_ranges = env.get_parameter_sampling_ranges() # range
params_factors = env.get_factors_for_randomisation() # multiplied factor
params_value_list = []
for param_key, param_value in params_dict.items():
if randomised_only and (param_key in randomised_params_keys) is False:
continue
if dynamics_only and ("time" in param_key or "noise" in param_key):
# print('Non-dynamics parameters: ', param_key)
continue
else:
if normalize:
if param_key in params_factors.keys():
param_factor = params_factors[param_key]
else:
raise NotImplementedError
if param_key in params_ranges.keys():
param_range = params_ranges[param_key]
else:
raise NotImplementedError
scale = param_range[1]-param_range[0]
param_factor = param_factor + 1e-15 # for factor=0.
param_value = np.clip((param_value/(param_factor) - param_range[0])/(scale), 0., 1.)
if isinstance(param_value, np.ndarray):
params_value_list = list(np.concatenate((params_value_list, param_value)))
else: # scalar
params_value_list.append(param_value)
return np.array(params_value_list)
def _flatten_obs(obs_dict, verbose=False): # gym observation wrapper
"""
Filters keys of interest out and concatenate the information.
Args:
obs_dict: ordered dictionary of observations
"""
keys = ["robot-state", "task-state", "target_pos",]
ob_lst = []
for key in obs_dict:
if key in keys: # hacked
if verbose:
print("adding key: {}".format(key))
ob_lst.append(obs_dict[key])
return np.concatenate(ob_lst)
def offline_history_collection(env_name, itr=30, policy=None, \
vectorize=True, discrete=False, vine=False, vine_sample_size=500, egreedy=0):
"""
Collect random simulation parameters and trajetories with given policy.
----------------------------------------------------------------
params:
env_name: name of env to collect data from
itr: data episodes
policy: policy used for collecting data
vectorize: vectorized parameters into a list rather than a dictionary, used for system identification
discrete: discrete randomisation range, as in EPI paper
vine: Vine data collection, same state and same action at the initial of trajectory, as in EPI paper
vine_sample_size: number of state action samples in vine trajectory set
egreedy: the factor for collecting data with epsilon-greedy policy
"""
env, environment_params, environment_wrappers, environment_wrapper_arguments = choose_env(env_name)
action_space = env.action_space
state_space = env.observation_space
if policy is None: # load off-line policy if no policy
policy=DPG_PolicyNetwork(state_space, action_space, 512).cuda()
# load from somewhere
history_sa=[]
history_s_=[]
params_list=[]
if vine:
vine_state_set = [] # underlying state of env, not the observation
vine_action_set = [] # initial action after initial state
vine_idx = 0
# collect state action sets according to EPI's vine implementation
while vine_idx<vine_sample_size:
state = env.reset()
while vine_idx<vine_sample_size:
if np.random.rand() < egreedy:
action = env.action_space.sample()
else:
action = policy.get_action(state)
vine_state_set.append(env.get_state())
vine_action_set.append(action)
vine_idx += 1
next_state, _, done, _ = env.step(action)
state = next_state
if done: break
print('Start collecting transitions.')
env.ignore_done = True
for epi in range(itr):
print('Episode: {}'.format(epi))
state = env.reset()
env.randomisation_off()
# reset_params = env.get_dynamics_parameters()
if discrete:
env.randomisation_on() # as sample_discretized_env_parameters() needs randomisation ranges
sampled_env_params_dict = sample_discretized_env_parameters(env)
env.randomisation_off()
env.set_dynamics_parameters(sampled_env_params_dict)
if vectorize:
env.randomisation_on()
params = query_params(env)
env.randomisation_off()
else:
params = env.get_dynamics_parameters()
params_list.append(params)
if vine:
epi_sa =[]
epi_s_ =[]
for underlying_state, action in zip(vine_state_set, vine_action_set):
env.set_state(underlying_state) # underlying state is different from obs of env
state = _flatten_obs(env._get_observation()) # hacked
try:
next_state, _, done, _ = env.step(action)
except MujocoException:
print('Data collection: MujocoException')
action = np.zeros_like(action)
next_state = state
epi_sa.append(np.concatenate((state, action)))
epi_s_.append(np.array(next_state))
if done: # keep using same env after done
env.reset()
history_sa.append(np.array(epi_sa))
history_s_.append(np.array(epi_s_))
else:
epi_traj = []
for step in range(env.horizon):
action = policy.get_action(state)
epi_traj.append(np.concatenate((state, action)))
try:
next_state, _, _, _ = env.step(action)
except MujocoException:
print('MujocoException')
action = | np.zeros(action) | numpy.zeros |
"""
Monitoring algorithms for Quicklook pipeline
"""
import numpy as np
import scipy.ndimage
import yaml
from lvmspec.quicklook.qas import MonitoringAlg, QASeverity
from lvmspec.quicklook import qlexceptions
from lvmspec.quicklook import qllogger
import os,sys
import datetime
from astropy.time import Time
from lvmspec.qa import qalib
from lvmspec.io import qa
qlog=qllogger.QLLogger("QuickLook",0)
log=qlog.getlog()
def qlf_post(qadict):
"""
A general function to HTTP post the QA output dictionary, intended for QLF
requires environmental variables: QLF_API_URL, QLF_USER, QLF_PASSWD
Args:
qadict: returned dictionary from a QA
"""
#- Check for environment variables and set them here
if "QLF_API_URL" in os.environ:
qlf_url=os.environ.get("QLF_API_URL")
if "QLF_USER" not in os.environ or "QLF_PASSWD" not in os.environ:
log.warning("Environment variables are not set for QLF. Set QLF_USER and QLF_PASSWD.")
else:
qlf_user=os.environ.get("QLF_USER")
qlf_passwd=os.environ.get("QLF_PASSWD")
log.debug("Environment variables are set for QLF. Now trying HTTP post.")
#- All set. Now try to HTTP post
try:
import requests
response=requests.get(qlf_url)
#- Check if the api has json
api=response.json()
#- proceed with post
job={"name":"QL","status":0,"dictionary":qadict} #- QLF should disintegrate dictionary
response=requests.post(api['job'],json=job,auth=(qlf_user,qlf_passwd))
except:
log.error("Skipping HTTP post... Exception",exc_info=true)
else:
log.warning("Skipping QLF. QLF_API_URL must be set as environment variable")
class Get_RMS(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="RMS"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NOISE_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NOISE_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NOISE_WARN_RANGE" in parms and "NOISE_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NOISE_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NOISE_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None, qafig=None,param=None,qlf=False, refmetrics=None):
retval={}
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
# return rms values in rms/sqrt(exptime)
rmsccd=qalib.getrms(image.pix/np.sqrt(image.meta["EXPTIME"])) #- should we add dark current and/or readnoise to this as well?
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"NOISE_NORMAL_RANGE":[-1.0, 1.0],
"NOISE_WARN_RANGE":[-2.0, 2.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NOISE_AMP_REF']=kwargs["REFERENCE"]
expnum=[]
rms_row=[]
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
for i in range(image.pix[thisoverscanboundary].shape[0]):
rmsrow = qalib.getrms(image.pix[thisoverscanboundary][i]/np.sqrt(image.meta["EXPTIME"]))
rms_row.append(rmsrow)
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
rmsover=np.max(rms_over_amps)
rmsdiff_err='NORMAL'
if amps:
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
thisoverscan_values=np.ravel(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
overscan_values+=thisoverscan_values.tolist()
rmsover=np.std(overscan_values)
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
else:
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_RMS
plot_RMS(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Count_Pixels(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="COUNTPIX"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NPIX_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NPIX_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NPIX_WARN_RANGE" in parms and "NPIX_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NPIX_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NPIX_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"CUTLO":3, # low threshold for number of counts in sigmas
"CUTHI":10,
"NPIX_NORMAL_RANGE":[200.0, 500.0],
"NPIX_WARN_RANGE":[50.0, 650.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NPIX_AMP_REF']=kwargs["REFERENCE"]
#- get the counts over entire CCD in counts per second
npixlo=qalib.countpix(image.pix,nsig=param['CUTLO']) #- above 3 sigma in counts
npixhi=qalib.countpix(image.pix,nsig=param['CUTHI']) #- above 10 sigma in counts
npix_err='NORMAL'
#- get the counts for each amp
if amps:
npixlo_amps=[]
npixhi_amps=[]
#- get amp boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
ampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
npixlo_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTLO'])
npixlo_amps.append(npixlo_thisamp)
npixhi_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTHI'])
npixhi_amps.append(npixhi_thisamp)
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps,"NPIX_STAT":npix_err}
else:
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_STAT":npix_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_countpix
plot_countpix(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Integrate_Spec(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="INTEG"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "INTEG_AVG"
status=kwargs['statKey'] if 'statKey' in kwargs else "MAGDIFF_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "MAGDIFF_WARN_RANGE" in parms and "MAGDIFF_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["MAGDIFF_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["MAGDIFF_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps, dict_countbins=dict_countbins, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,dict_countbins=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME" ] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
#- get the integrals for all fibers
flux=frame.flux
wave=frame.wave
integrals=np.zeros(flux.shape[0])
for ii in range(len(integrals)):
integrals[ii]=qalib.integrate_spec(wave,flux[ii])
#- average integrals over fibers of each object type and get imaging magnitudes
integ_avg_tgt=[]
mag_avg_tgt=[]
for T in ["ELG","QSO","LRG","STD"]:
fibers=np.where(frame.fibermap['OBJTYPE']==T)[0]
if len(fibers) < 1:
log.warning("no {} fibers found.".format(T))
magnitudes=frame.fibermap['MAG'][fibers]
mag_avg=np.mean(magnitudes)
mag_avg_tgt.append(mag_avg)
integ=integrals[fibers]
integ_avg=np.mean(integ)
integ_avg_tgt.append(integ_avg)
if T == "STD":
starfibers=fibers
int_stars=integ
int_average=integ_avg
# simple, temporary magdiff calculation (to be corrected...)
magdiff_avg=[]
for i in range(len(mag_avg_tgt)):
mag_fib=-2.5*np.log(integ_avg_tgt[i]/frame.meta["EXPTIME"])+30.
if mag_avg_tgt[i] != np.nan:
magdiff=mag_fib-mag_avg_tgt[i]
else:
magdiff=nan
magdiff_avg.append(magdiff)
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"MAGDIFF_NORMAL_RANGE":[-0.5, 0.5],
"MAGDIFF_WARN_RANGE":[-1.0, 1.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['MAGDIFF_TGT_REF']=kwargs["REFERENCE"]
magdiff_avg_amp = [0.0]
magdiff_err='NORMAL'
#- get the counts for each amp
if amps:
#- get the fiducial boundary
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
int_avg_amps=np.zeros(4)
for amp in range(4):
wave=frame.wave[fidboundary[amp][1]]
select_thisamp=starfibers[(starfibers >= fidboundary[amp][0].start) & (starfibers < fidboundary[amp][0].stop)]
stdflux_thisamp=frame.flux[select_thisamp,fidboundary[amp][1]]
if len(stdflux_thisamp)==0:
continue
else:
integ_thisamp=np.zeros(stdflux_thisamp.shape[0])
for ii in range(stdflux_thisamp.shape[0]):
integ_thisamp[ii]=qalib.integrate_spec(wave,stdflux_thisamp[ii])
int_avg_amps[amp]=np.mean(integ_thisamp)
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp,"MAGDIFF_STAT":magdiff_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_STAT":magdiff_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_integral
plot_integral(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Continuum(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYCONT"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "SKYCONT"
status=kwargs['statKey'] if 'statKey' in kwargs else "SKYCONT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "SKYCONT_WARN_RANGE" in parms and "SKYCONT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["SKYCONT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["SKYCONT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
camera=input_frame.meta["CAMERA"]
wrange1=None
wrange2=None
if "wrange1" in kwargs:
wrange1=kwargs["wrange1"]
if "wrange2" in kwargs:
wrange2=kwargs["wrange2"]
if wrange1==None:
if camera[0]=="b": wrange1= "4000,4500"
if camera[0]=="r": wrange1= "5950,6200"
if camera[0]=="z": wrange1= "8120,8270"
if wrange2==None:
if camera[0]=="b": wrange2= "5250,5550"
if camera[0]=="r": wrange2= "6990,7230"
if camera[0]=="z": wrange2= "9110,9280"
paname=None
if "paname" in kwargs:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig=None
return self.run_qa(fibermap,input_frame,wrange1=wrange1,wrange2=wrange2,paname=paname,amps=amps, dict_countbins=dict_countbins,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,wrange1=None,wrange2=None,
paname=None,amps=False,dict_countbins=None,
qafile=None,qafig=None, param=None, qlf=False,
refmetrics=None):
#- qa dictionary
retval={}
retval["PANAME" ]= paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = {}
for key in ['B_CONT','R_CONT', 'Z_CONT', 'SKYCONT_WARN_RANGE', 'SKYCONT_ALARM_RANGE']:
param[key] = desi_params['qa']['skysub']['PARAMS'][key]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['SKYCONT_REF']=kwargs["REFERENCE"]
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(
frame, wrange1, wrange2)
skycont_err = 'NORMAL'
if amps:
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
k1=np.where(skyfiber < fidboundary[0][0].stop)[0]
maxsky_index=max(k1)
contamp1=np.mean(contfiberlow[:maxsky_index])
contamp3=np.mean(contfiberhigh[:maxsky_index])
if fidboundary[1][0].start >=fidboundary[0][0].stop:
k2=np.where(skyfiber > fidboundary[1][0].start)[0]
minsky_index=min(k2)
contamp2=np.mean(contfiberlow[minsky_index:])
contamp4=np.mean(contfiberhigh[minsky_index:])
else:
contamp2=0
contamp4=0
skycont_amps=np.array((contamp1,contamp2,contamp3,contamp4)) #- in four amps regions
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps, "SKYCONT_STAT":skycont_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_STAT":skycont_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_continuum
plot_sky_continuum(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Peaks(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYPEAK"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "PEAKCOUNT_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "PEAKCOUNT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "PEAKCOUNT_WARN_RANGE" in parms and "PEAKCOUNT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["PEAKCOUNT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["PEAKCOUNT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image, got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs:
qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps,psf=psf, qafile=qafile, qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from lvmspec.qa.qalib import sky_peaks
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = camera = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
# Parameters
if param is None:
log.info("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = desi_params['qa']['skypeaks']['PARAMS']
# Run
nspec_counts, sky_counts = sky_peaks(param, frame, amps=amps)
rms_nspec = qalib.getrms(nspec_counts)
rms_skyspec = qalib.getrms(sky_counts)
sumcount_med_sky=[]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['PEAKCOUNT_REF']=kwargs["REFERENCE"]
# retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec}
sumcount_err='NORMAL'
retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec,"PEAKCOUNT_STAT":sumcount_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_peaks
plot_sky_peaks(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Calc_XWSigma(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="XWSIGMA"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "WSIGMA_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "XWSIGMA_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "XWSIGMA_WARN_RANGE" in parms and "XWSIGMA_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["XWSIGMA_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["XWSIGMA_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
fibermap = None
if "FiberMap" in kwargs:
fibermap=kwargs["FiberMap"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_image,paname=paname,amps=amps,psf=psf, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,image,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from scipy.optimize import curve_fit
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = camera = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
if image.meta["FLAVOR"] == 'arc':
param = {
"B_PEAKS":[4047.7, 4359.6, 5087.2],
"R_PEAKS":[6144.8, 6508.3, 6600.8, 6718.9, 6931.4, 7034.4,],
"Z_PEAKS":[8379.9, 8497.7, 8656.8, 8783.0],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
else:
param = {
"B_PEAKS":[3914.4, 5199.3, 5578.9],
"R_PEAKS":[6301.9, 6365.4, 7318.2, 7342.8, 7371.3],
"Z_PEAKS":[8401.5, 8432.4, 8467.5, 9479.4, 9505.6, 9521.8],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
dw=2.
dp=3
b_peaks=param['B_PEAKS']
r_peaks=param['R_PEAKS']
z_peaks=param['Z_PEAKS']
if fibermap["OBJTYPE"][0] == 'ARC':
import lvmspec.psf
psf=lvmspec.psf.PSF(psf)
xsigma=[]
wsigma=[]
xsigma_sky=[]
wsigma_sky=[]
xsigma_amp1=[]
wsigma_amp1=[]
xsigma_amp2=[]
wsigma_amp2=[]
xsigma_amp3=[]
wsigma_amp3=[]
xsigma_amp4=[]
wsigma_amp4=[]
if fibermap['FIBER'].shape[0] >= 500:
fibers = 500
else:
fibers = fibermap['FIBER'].shape[0]
for i in range(fibers):
if camera[0]=="b":
peak_wave=np.array([b_peaks[0]-dw,b_peaks[0]+dw,b_peaks[1]-dw,b_peaks[1]+dw,b_peaks[2]-dw,b_peaks[2]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int( | np.mean(ypix_peak2) | numpy.mean |
import h5py
import numpy as np
from ..simulation_utilities import *
__all__ = ['SimInfo']
class SimInfo:
"""
Object that takes in a .hdf5 file (one of the outputs of the simulation) and provides tools for analysis.
:param h5Name: hdf5 file
:type str
"""
def __init__(self, h5_name):
self.fname = h5_name
self._initialize()
self._load_sim_H5()
def _initialize(self):
wfn_name_temp = self.fname.split('/')
sim_name = wfn_name_temp[-1]
pth = '/'.join(wfn_name_temp[:-1])
sim_name = sim_name.split('sim_info')[0]
self._wfn_names = f"{pth}/wfns/{sim_name}wfn_"
def _load_sim_H5(self):
with h5py.File(self.fname, 'r') as f:
self.vref_vs_tau = f['vref_vs_tau'][:]
self.pop_vs_tau = f['pop_vs_tau'][:]
self.atom_nums = f['atomic_nums'][:]
self.atom_masses = f['atomic_masses'][:]
@staticmethod
def get_wfn(wfn_fl, ret_ang=False):
"""
Given a .hdf5 file, return wave function and descendant weights associated with that wave function.
:param wfn_fl: A resultant .hdf5 file from a PyVibDMC simulation
:param ret_ang: boolean indicating returning the coordinates in angtstroms. Bohr is the default.
:return: Coordinates array in angstroms (nxmx3), descendant weights array (n).
"""
with h5py.File(wfn_fl, 'r') as f:
cds = f['coords'][:]
if ret_ang:
# Fenris said it was dumb to convert, let the user decide what to do
cds = Constants.convert(cds, 'angstroms', to_AU=False)
dw = f['desc_wts'][:]
return cds, dw
def get_wfns(self, time_step_list, ret_ang=False):
"""
Extract the wave function (walker set) and descendant weights given a time step number or numbers
:param time_step_list: a list of ints that correspond to the time steps you want the wfn from given the simulation you are working with
:type time_step_list: int or list
:param ret_ang: boolean indicating returning the coordinates in angtstroms. Bohr is the default.
:return:
"""
time_step_list = [time_step_list] if isinstance(time_step_list, int) else time_step_list
fl_list = [f'{self._wfn_names}{x}ts.hdf5' for x in time_step_list]
tot_cds = []
tot_dw = []
for fl in fl_list:
cds, dw = self.get_wfn(fl, ret_ang)
tot_cds.append(cds)
tot_dw.append(dw)
tot_cds = | np.concatenate(tot_cds) | numpy.concatenate |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 09:42:22 2016
@author: bing
"""
import numpy as np
#import scipy
import numba
import sys
import math
bohr_angstrom = 0.52917721092
hartree_wavenumber = 219474.63
#hartree_wavenumber = scipy.constants.value(u'hartree-inverse meter relationship') / 1e2
def M1mat(a, Nb):
M1 = | np.zeros((Nb,Nb)) | numpy.zeros |
# Test multi-species options in mt3d
import os
import numpy as np
import flopy
testpth = os.path.join(".", "temp", "t023")
# make the directory if it does not exist
if not os.path.isdir(testpth):
os.makedirs(testpth)
def test_mt3d_multispecies():
# modflow model
modelname = "multispecies"
nlay = 1
nrow = 20
ncol = 20
nper = 10
mf = flopy.modflow.Modflow(modelname=modelname, model_ws=testpth)
dis = flopy.modflow.ModflowDis(
mf, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper
)
lpf = flopy.modflow.ModflowLpf(mf)
rch = flopy.modflow.ModflowRch(mf)
evt = flopy.modflow.ModflowEvt(mf)
mf.write_input()
# Create a 5-component mt3d model and write the files
ncomp = 5
mt = flopy.mt3d.Mt3dms(
modelname=modelname, modflowmodel=mf, model_ws=testpth, verbose=True
)
sconc3 = np.random.random((nrow, ncol))
btn = flopy.mt3d.Mt3dBtn(
mt, ncomp=ncomp, sconc=1.0, sconc2=2.0, sconc3=sconc3, sconc5=5.0
)
# check obs I/O
mt.btn.obs = np.array([[0, 2, 300], [0, 1, 250]])
crch32 = np.random.random((nrow, ncol))
cevt33 = | np.random.random((nrow, ncol)) | numpy.random.random |
import numpy as np
from pyhack.py_runko_aux import *
from pyhack.gauss_lobatto import CollGaussLobatto as lobatto
class coll:
def __init__(self,tile,dtf=1,M=3,K=3,collclass=lobatto,**kwargs):
self.collclass = collclass
coll = self.collclass(M,0,1)
self.K = K
self.M = M
self.nodes = coll._getNodes
self.weights = coll._getWeights(coll.tleft,coll.tright) #Get M nodes and weights
self.Qmat = coll._gen_Qmatrix #Generate q_(m,j), i.e. the large weights matrix
self.Smat = coll._gen_Smatrix #Generate s_(m,j), i.e. the large node-to-node weights matrix
self.delta_m = coll._gen_deltas #Generate vector of node spacings
self.Qmat *= dtf
self.Smat *= dtf
self.delta_m *= dtf
self.ssi = 1
## Parameters from runko tile/container
cont = tile.get_container(0)
pos = py_pos(cont)
self.nq = pos.shape[0]
self.q = cont.q
self.c = tile.cfl
self.predictor = False
if "predictor" in kwargs:
if kwargs["predictor"] == True:
self.predictor = True
nq = self.nq
#Collocation solution stuff
Ix = np.array([1,0])
Iv = np.array([0,1])
Ixv = np.array([[0,1],[0,0]])
Id = np.identity(nq*3)
I2d = np.identity(nq*3*2)
self.Ix = Ix
self.Iv = Iv
self.Ixv = Ixv
self.Id = Id
Qtil = self.Qmat[1:,1:]
I3M = np.identity(3*M)
self.Q = np.kron(np.identity(2),np.kron(Qtil,Id))
#Define required calculation matrices
QE = np.zeros((M+1,M+1),dtype=np.float)
QI = np.zeros((M+1,M+1),dtype=np.float)
QT = np.zeros((M+1,M+1),dtype=np.float)
SX = np.zeros((M+1,M+1),dtype=np.float)
for i in range(0,M):
QE[(i+1):,i] = self.delta_m[i]
QI[(i+1):,i+1] = self.delta_m[i]
QT = 1/2 * (QE + QI)
QX = QE @ QT + (QE*QE)/2
SX[:,:] = QX[:,:]
SX[1:,:] = QX[1:,:] - QX[0:-1,:]
self.SX = SX
self.SQ = self.Smat @ self.Qmat
d = 3*nq
self.x0 = np.zeros((M+1,nq,3),dtype=np.float)
self.x = np.zeros((M+1,nq,3),dtype=np.float)
self.xn = np.zeros((M+1,nq,3),dtype=np.float)
self.u0 = np.zeros((M+1,nq,3),dtype=np.float)
self.u = np.zeros((M+1,nq,3),dtype=np.float)
self.un = np.zeros((M+1,nq,3),dtype=np.float)
self.E = np.zeros((M+1,nq,3),dtype=np.float)
self.En = np.zeros((M+1,nq,3),dtype=np.float)
self.B = np.zeros((M+1,nq,3),dtype=np.float)
self.Bn = | np.zeros((M+1,nq,3),dtype=np.float) | numpy.zeros |
# Copyright 2019, the MIDOSS project contributors, The University of British Columbia,
# and Dalhousie University.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import h5py
import os
import yaml
def make_stats_file(path, GridX, GridY, output):
files =[]
for r, d, f in os.walk(path):
for file in f:
if '.hdf5' in file:
files.append(os.path.join(r, file))
stats_dict = {'variable':{'mean':2, 'min':1, 'max':5, 'std':6}}
for file in files:
with h5py.File(file, 'r') as f:
for group in list(f['Results'].keys()):
timeseries = np.array([])
for time in list(f['Results'][group].keys()):
if np.ndim(f['Results'][group][time][:]) == 3:
timeseries = np.append(timeseries, f['Results'][group][time][-1, GridX, GridY])
else:
timeseries = np.append(timeseries, f['Results'][group][time][GridX, GridY])
stats_dict[group] = {'min': "%.4g" % np.min(timeseries),
'max': "%.4g" % np.max(timeseries),
'mean': "%.4g" % np.mean(timeseries),
'std': "%.4g" % np.std(timeseries)}
if group == 'wind velocity X':
windx = timeseries
if group == 'wind velocity Y':
windy = timeseries
if group == 'velocity U':
currentsu = timeseries
if group == 'velocity V':
currentsv = timeseries
if group == 'Stokes U':
stokesu = timeseries
if group == 'Stokes V':
stokesv = timeseries
windspeed = np.mean(np.array([windx, windy]), axis=0)
stats_dict['wind speed'] = {'min': "%.4g" % np.min(windspeed),
'max': "%.4g" % np.max(windspeed),
'mean': "%.4g" % np.mean(windspeed),
'std': "%.4g" % np.std(windspeed)}
currentsspeed = np.mean(np.array([currentsu, currentsv]), axis=0)
stats_dict['currents speed'] = {'min': "%.4g" % np.min(currentsspeed),
'max': "%.4g" % np.max(currentsspeed),
'mean': "%.4g" % np.mean(currentsspeed),
'std': "%.4g" % | np.std(currentsspeed) | numpy.std |
import unittest
import numpy as np
from seedpod_ground_risk.path_analysis.harm_models.strike_model import StrikeModel, get_lethal_area
class StrikeModelTestCase(unittest.TestCase):
def setUp(self) -> None:
self.test_shape = (100, 100)
self.pix_area = 20 * 20
self.test_pdf = np.random.random(self.test_shape)
def test_lethal_area_ranges(self):
angles = np.deg2rad(np.linspace(1, 90))
la = get_lethal_area(angles, 1)
prop_la = la / self.pix_area
self.assertLessEqual(prop_la.max(), 1)
self.assertGreaterEqual(prop_la.min(), 0)
def test_max_range(self):
max_grid = np.full(self.test_shape, 1)
sm = StrikeModel(max_grid, self.pix_area, 2, np.deg2rad(30))
out = sm.transform(max_grid)
self.assertLessEqual(out.max(), 1)
self.assertGreaterEqual(out.min(), 0)
def test_min_range(self):
min_grid = np.full(self.test_shape, 0)
sm = StrikeModel(min_grid, self.pix_area, 2, np.deg2rad(30))
out = sm.transform(min_grid)
self.assertLessEqual(out.max(), 1)
self.assertGreaterEqual(out.min(), 0)
def test_array_inputs(self):
angles = np.random.random(self.test_shape) * 2
pop_density = | np.random.random(self.test_shape) | numpy.random.random |
"""Script to generate figures for Beltran & Kannan et. al.
Two figures were made by hand. Figure 1 is a pair of blender renderings. The
relevant blend file names are simply mentioned below.
Where data has to be pre-computed, the procedure is mentioned."""
import re
from pathlib import Path
import pickle
import matplotlib.cm as cm
import matplotlib.ticker as tck
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt
import scipy
from scipy import stats
from scipy.optimize import curve_fit
#from sklearn.gaussian_process import GaussianProcessRegressor
#from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from nuc_chain import geometry as ncg
from nuc_chain import linkers as ncl
from nuc_chain import rotations as ncr
from MultiPoint import propagator
from nuc_chain import fluctuations as wlc
from nuc_chain import visualization as vis
from nuc_chain.linkers import convert
# Plotting parameters
#width of one column on ppt slide in inch
col_width = 5.67
full_width = 8.63
aspect_ratio = 2/3
col_height = aspect_ratio*col_width
full_height = aspect_ratio*full_width
plot_params = {
'backend': 'pdf',
'savefig.format': 'pdf',
'text.usetex': True,
'font.size': 18,
'figure.figsize': [full_width, full_height],
'figure.facecolor': 'white',
'axes.grid': False,
'axes.edgecolor': 'black',
'axes.facecolor': 'white',
'axes.titlesize': 20,
'axes.labelsize': 20,
'legend.fontsize': 18,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'axes.linewidth': 1,
'xtick.top': False,
'xtick.bottom': True,
'xtick.direction': 'out',
'xtick.minor.size': 3,
'xtick.minor.width': 0.5,
'xtick.major.pad': 5,
'xtick.major.size': 5,
'xtick.major.width': 1,
'ytick.left': True,
'ytick.right': False,
'ytick.direction': 'out',
'ytick.minor.size': 3,
'ytick.minor.width': 0.5,
'ytick.major.pad': 5,
'ytick.major.size': 5,
'ytick.major.width': 1,
'lines.linewidth': 2
}
plt.rcParams.update(plot_params)
teal_flucts = '#387780'
red_geom = '#E83151'
dull_purple = '#755F80'
rich_purple = '#e830e8'
def render_chain(linkers, unwraps=0, **kwargs):
entry_rots, entry_pos = ncg.minimum_energy_no_sterics_linker_only(linkers, unwraps=unwraps)
# on linux, hit ctrl-d in the ipython terminal but don't accept the
# "exit" prompt to get the mayavi interactive mode to work. make sure
# to use "off-screen rendering" and fullscreen your window before
# saving (this is actually required if you're using a tiling window
# manager like e.g. i3 or xmonad).
vis.visualize_chain(entry_rots, entry_pos, linkers, unwraps=unwraps, plot_spheres=True, **kwargs)
def draw_triangle(alpha, x0, width, orientation, base=10,
**kwargs):
"""Draw a triangle showing the best-fit slope on a linear scale.
Parameters
----------
alpha : float
the slope being demonstrated
x0 : (2,) array_like
the "left tip" of the triangle, where the hypotenuse starts
width : float
horizontal size
orientation : string
'up' or 'down', control which way the triangle's right angle "points"
base : float
scale "width" for non-base 10
Returns
-------
corner : (2,) np.array
coordinates of the right-angled corner of the triangle
"""
x0, y0 = x0
x1 = x0 + width
y1 = y0 + alpha*(x1 - x0)
plt.plot([x0, x1], [y0, y1], 'k')
if (alpha >= 0 and orientation == 'up') \
or (alpha < 0 and orientation == 'down'):
plt.plot([x0, x1], [y1, y1], 'k')
plt.plot([x0, x0], [y0, y1], 'k')
# plt.plot lines have nice rounded caps
# plt.hlines(y1, x0, x1, **kwargs)
# plt.vlines(x0, y0, y1, **kwargs)
corner = [x0, y1]
elif (alpha >= 0 and orientation == 'down') \
or (alpha < 0 and orientation == 'up'):
plt.plot([x0, x1], [y0, y0], 'k')
plt.plot([x1, x1], [y0, y1], 'k')
# plt.hlines(y0, x0, x1, **kwargs)
# plt.vlines(x1, y0, y1, **kwargs)
corner = [x1, y0]
else:
raise ValueError(r"Need $\alpha\in\mathbb{R} and orientation\in{'up', 'down'}")
return corner
def draw_power_law_triangle(alpha, x0, width, orientation, base=10,
**kwargs):
"""Draw a triangle showing the best-fit power-law on a log-log scale.
Parameters
----------
alpha : float
the power-law slope being demonstrated
x0 : (2,) array_like
the "left tip" of the power law triangle, where the hypotenuse starts
(in log units, to be consistent with draw_triangle)
width : float
horizontal size in number of major log ticks (default base-10)
orientation : string
'up' or 'down', control which way the triangle's right angle "points"
base : float
scale "width" for non-base 10
Returns
-------
corner : (2,) np.array
coordinates of the right-angled corner of the triangle
"""
x0, y0 = [base**x for x in x0]
x1 = x0*base**width
y1 = y0*(x1/x0)**alpha
plt.plot([x0, x1], [y0, y1], 'k')
if (alpha >= 0 and orientation == 'up') \
or (alpha < 0 and orientation == 'down'):
plt.plot([x0, x1], [y1, y1], 'k')
plt.plot([x0, x0], [y0, y1], 'k')
# plt.plot lines have nice rounded caps
# plt.hlines(y1, x0, x1, **kwargs)
# plt.vlines(x0, y0, y1, **kwargs)
corner = [x0, y1]
elif (alpha >= 0 and orientation == 'down') \
or (alpha < 0 and orientation == 'up'):
plt.plot([x0, x1], [y0, y0], 'k')
plt.plot([x1, x1], [y0, y1], 'k')
# plt.hlines(y0, x0, x1, **kwargs)
# plt.vlines(x1, y0, y1, **kwargs)
corner = [x1, y0]
else:
raise ValueError(r"Need $\alpha\in\mathbb{R} and orientation\in{'up', 'down'}")
return corner
#link_ix, unwrap_ix, rise, angle, radius = ncg.tabulate_rise(dp_f=ncg.dp_omega_exit)
def plot_fig31_rise_vs_linker_length():
fig, ax = plt.subplots(figsize=(1.2*default_width, default_height))
links = np.arange(10, 101)
#kuhns1to250 = np.load('csvs/kuhns_1to250links_0to146unwraps.npy')
#calculate the 'phi' angle corresponding to twist due to linker
phis_dp_omega_exit = np.zeros(links.size)
for i, link in enumerate(links):
dP, Onext = ncg.dp_omega_exit(link, unwrap=0)
phi, theta, alpha = ncr.phi_theta_alpha_from_R(Onext)
#record angles in units of pi
phis_dp_omega_exit[i] = phi/np.pi + 1
plt.plot(links, rise[0:91,0], linewidth=0.5)
plt.scatter(links, rise[0:91,0], c=phis_dp_omega_exit, cmap='Spectral', s=3);
plt.xlabel('Linker length (bp)')
plt.ylabel(r'Rise (nm)')
plt.subplots_adjust(left=0.1, bottom=0.19, top=0.95, right=0.97)
cb = plt.colorbar(ticks=[0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2])
cb.set_label(r'$\phi$')
cb.ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g $\pi$'))
#cb.ax.yaxis.set_yticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2],
# [r'$0$', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$',
# r'$\frac{5\pi}{4}$', r'$\frac{3\pi}{2}$', r'$\frac{7\pi}{4}$', r'$2\pi$'])
fig.text(0.13, 0.47, r'38 bp', size=10)
fig.text(0.12, 0.57, r'36 bp', size=10)
plt.savefig('plots/thesis/fig3.1_rise-vs-linker-length.pdf')
default_lis = [36]
default_colors = [teal_flucts]
def plot_r2_homo(lis=default_lis, colors=None):
"""The r2 of the 36bp homogenous chain (0 unwrapping) compared to the
wormlike chain with the corresponding Kuhn length."""
if colors is None:
if len(lis) == 2:
colors = default_colors
else:
colors = len(lis) * [teal_flucts]
assert(len(colors) == len(lis))
fig, ax = plt.subplots(figsize=(7.79, 4.43))
x = np.logspace(0, 7, 100)
#plot rigid rod line
plt.plot(x, x, '^', markersize=3, color=red_geom)
hdfs = {}
for i, li in enumerate(lis):
hdfs[li] = pd.read_csv(f'./csvs/r2/r2-fluctuations-mu_{li}-sigma_0_10_0unwraps.csv')
try:
del hdfs[li]['Unnamed: 0']
except:
pass
hdfs[li] = hdfs[li].set_index(['variance', 'chain_id']).loc[0.0, 0.0]
hdfs[li].iloc[0,0:2] = 1 # rmax,r2 == (0,0) ==> (1,1)
plt.plot(hdfs[li]['rmax'], np.sqrt(hdfs[li]['r2']), color=colors[i])
for li in lis:
y = np.sqrt(wlc.r2wlc(x, hdfs[li]['kuhn'].mean()/2))
plt.plot(x, y, '-.', color=[0,0,0], markersize=1)
xmin = 1
ymin = xmin
ymax = 700
xmax = 3_000
# bands representing different regimes of the R^2
plt.fill_between(x, ymin, ymax, where=x<12, color=[0.96, 0.95, 0.95])
plt.fill_between(x, ymin, ymax, where=((x>=12)&(x<250)), color=[0.99, 0.99, 0.99])
plt.fill_between(x, ymin, ymax, where=x>=250, color=[0.9, 0.9, 0.91])
# power law triangle for the two extremal regimes
corner = draw_power_law_triangle(1, [np.log10(2), np.log10(3)], 0.5, 'up')
plt.text(3, 11, '$L^1$')
corner = draw_power_law_triangle(1/2, [ | np.log10(350) | numpy.log10 |
import os
import pickle
import random
import time
import PIL
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
from keras import Input, Model
from keras import backend as K
from keras.callbacks import TensorBoard
from keras.layers import Dense, LeakyReLU, BatchNormalization, ReLU, Reshape, UpSampling2D, Conv2D, Activation, \
concatenate, Flatten, Lambda, Concatenate
from keras.optimizers import Adam
from matplotlib import pyplot as plt
def load_class_ids(class_info_file_path):
"""
Load class ids from class_info.pickle file
"""
with open(class_info_file_path, 'rb') as f:
class_ids = pickle.load(f, encoding='latin1')
return class_ids
def load_embeddings(embeddings_file_path):
"""
Load embeddings
"""
with open(embeddings_file_path, 'rb') as f:
embeddings = pickle.load(f, encoding='latin1')
embeddings = np.array(embeddings)
print('embeddings: ', embeddings.shape)
return embeddings
def load_filenames(filenames_file_path):
"""
Load filenames.pickle file and return a list of all file names
"""
with open(filenames_file_path, 'rb') as f:
filenames = pickle.load(f, encoding='latin1')
return filenames
def load_bounding_boxes(dataset_dir):
"""
Load bounding boxes and return a dictionary of file names and corresponding bounding boxes
"""
# Paths
bounding_boxes_path = os.path.join(dataset_dir, 'bounding_boxes.txt')
file_paths_path = os.path.join(dataset_dir, 'images.txt')
# Read bounding_boxes.txt and images.txt file
df_bounding_boxes = pd.read_csv(bounding_boxes_path,
delim_whitespace=True, header=None).astype(int)
df_file_names = pd.read_csv(file_paths_path, delim_whitespace=True, header=None)
# Create a list of file names
file_names = df_file_names[1].tolist()
# Create a dictionary of file_names and bounding boxes
filename_boundingbox_dict = {img_file[:-4]: [] for img_file in file_names[:2]}
# Assign a bounding box to the corresponding image
for i in range(0, len(file_names)):
# Get the bounding box
bounding_box = df_bounding_boxes.iloc[i][1:].tolist()
key = file_names[i][:-4]
filename_boundingbox_dict[key] = bounding_box
return filename_boundingbox_dict
def get_img(img_path, bbox, image_size):
"""
Load and resize image
"""
img = Image.open(img_path).convert('RGB')
width, height = img.size
if bbox is not None:
R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y1 = np.maximum(0, center_y - R)
y2 = np.minimum(height, center_y + R)
x1 = np.maximum(0, center_x - R)
x2 = np.minimum(width, center_x + R)
img = img.crop([x1, y1, x2, y2])
img = img.resize(image_size, PIL.Image.BILINEAR)
return img
def load_dataset(filenames_file_path, class_info_file_path, cub_dataset_dir, embeddings_file_path, image_size):
"""
Load dataset
"""
filenames = load_filenames(filenames_file_path)
class_ids = load_class_ids(class_info_file_path)
bounding_boxes = load_bounding_boxes(cub_dataset_dir)
all_embeddings = load_embeddings(embeddings_file_path)
X, y, embeddings = [], [], []
print("Embeddings shape:", all_embeddings.shape)
for index, filename in enumerate(filenames):
bounding_box = bounding_boxes[filename]
try:
# Load images
img_name = '{}/images/{}.jpg'.format(cub_dataset_dir, filename)
img = get_img(img_name, bounding_box, image_size)
all_embeddings1 = all_embeddings[index, :, :]
embedding_ix = random.randint(0, all_embeddings1.shape[0] - 1)
embedding = all_embeddings1[embedding_ix, :]
X.append(np.array(img))
y.append(class_ids[index])
embeddings.append(embedding)
except Exception as e:
print(e)
X = np.array(X)
y = np.array(y)
embeddings = np.array(embeddings)
return X, y, embeddings
def generate_c(x):
mean = x[:, :128]
log_sigma = x[:, 128:]
stddev = K.exp(log_sigma)
epsilon = K.random_normal(shape=K.constant((mean.shape[1],), dtype='int32'))
c = stddev * epsilon + mean
return c
def build_ca_model():
"""
Get conditioning augmentation model.
Takes an embedding of shape (1024,) and returns a tensor of shape (256,)
"""
input_layer = Input(shape=(1024,))
x = Dense(256)(input_layer)
x = LeakyReLU(alpha=0.2)(x)
model = Model(inputs=[input_layer], outputs=[x])
return model
def build_embedding_compressor_model():
"""
Build embedding compressor model
"""
input_layer = Input(shape=(1024,))
x = Dense(128)(input_layer)
x = ReLU()(x)
model = Model(inputs=[input_layer], outputs=[x])
return model
def build_stage1_generator():
"""
Builds a generator model used in Stage-I
"""
input_layer = Input(shape=(1024,))
x = Dense(256)(input_layer)
mean_logsigma = LeakyReLU(alpha=0.2)(x)
c = Lambda(generate_c)(mean_logsigma)
input_layer2 = Input(shape=(100,))
gen_input = Concatenate(axis=1)([c, input_layer2])
x = Dense(128 * 8 * 4 * 4, use_bias=False)(gen_input)
x = ReLU()(x)
x = Reshape((4, 4, 128 * 8), input_shape=(128 * 8 * 4 * 4,))(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(512, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(256, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(128, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(64, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(3, kernel_size=3, padding="same", strides=1, use_bias=False)(x)
x = Activation(activation='tanh')(x)
stage1_gen = Model(inputs=[input_layer, input_layer2], outputs=[x, mean_logsigma])
return stage1_gen
def build_stage1_discriminator():
"""
Create a model which takes two inputs
1. One from above network
2. One from the embedding layer
3. Concatenate along the axis dimension and feed it to the last module which produces final logits
"""
input_layer = Input(shape=(64, 64, 3))
x = Conv2D(64, (4, 4),
padding='same', strides=2,
input_shape=(64, 64, 3), use_bias=False)(input_layer)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(128, (4, 4), padding='same', strides=2, use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(256, (4, 4), padding='same', strides=2, use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(512, (4, 4), padding='same', strides=2, use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
input_layer2 = Input(shape=(4, 4, 128))
merged_input = concatenate([x, input_layer2])
x2 = Conv2D(64 * 8, kernel_size=1,
padding="same", strides=1)(merged_input)
x2 = BatchNormalization()(x2)
x2 = LeakyReLU(alpha=0.2)(x2)
x2 = Flatten()(x2)
x2 = Dense(1)(x2)
x2 = Activation('sigmoid')(x2)
stage1_dis = Model(inputs=[input_layer, input_layer2], outputs=[x2])
return stage1_dis
def build_adversarial_model(gen_model, dis_model):
input_layer = Input(shape=(1024,))
input_layer2 = Input(shape=(100,))
input_layer3 = Input(shape=(4, 4, 128))
x, mean_logsigma = gen_model([input_layer, input_layer2])
dis_model.trainable = False
valid = dis_model([x, input_layer3])
model = Model(inputs=[input_layer, input_layer2, input_layer3], outputs=[valid, mean_logsigma])
return model
def KL_loss(y_true, y_pred):
mean = y_pred[:, :128]
logsigma = y_pred[:, :128]
loss = -logsigma + .5 * (-1 + K.exp(2. * logsigma) + K.square(mean))
loss = K.mean(loss)
return loss
def custom_generator_loss(y_true, y_pred):
# Calculate binary cross entropy loss
return K.binary_crossentropy(y_true, y_pred)
def save_rgb_img(img, path):
"""
Save an rgb image
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(img)
ax.axis("off")
ax.set_title("Image")
plt.savefig(path)
plt.close()
def write_log(callback, name, loss, batch_no):
"""
Write training summary to TensorBoard
"""
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = loss
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
if __name__ == '__main__':
data_dir = "data/birds/"
train_dir = data_dir + "/train"
test_dir = data_dir + "/test"
image_size = 64
batch_size = 64
z_dim = 100
stage1_generator_lr = 0.0002
stage1_discriminator_lr = 0.0002
stage1_lr_decay_step = 600
epochs = 1000
condition_dim = 128
embeddings_file_path_train = train_dir + "/char-CNN-RNN-embeddings.pickle"
embeddings_file_path_test = test_dir + "/char-CNN-RNN-embeddings.pickle"
filenames_file_path_train = train_dir + "/filenames.pickle"
filenames_file_path_test = test_dir + "/filenames.pickle"
class_info_file_path_train = train_dir + "/class_info.pickle"
class_info_file_path_test = test_dir + "/class_info.pickle"
cub_dataset_dir = data_dir + "/CUB_200_2011"
# Define optimizers
dis_optimizer = Adam(lr=stage1_discriminator_lr, beta_1=0.5, beta_2=0.999)
gen_optimizer = Adam(lr=stage1_generator_lr, beta_1=0.5, beta_2=0.999)
""""
Load datasets
"""
X_train, y_train, embeddings_train = load_dataset(filenames_file_path=filenames_file_path_train,
class_info_file_path=class_info_file_path_train,
cub_dataset_dir=cub_dataset_dir,
embeddings_file_path=embeddings_file_path_train,
image_size=(64, 64))
X_test, y_test, embeddings_test = load_dataset(filenames_file_path=filenames_file_path_test,
class_info_file_path=class_info_file_path_test,
cub_dataset_dir=cub_dataset_dir,
embeddings_file_path=embeddings_file_path_test,
image_size=(64, 64))
"""
Build and compile networks
"""
ca_model = build_ca_model()
ca_model.compile(loss="binary_crossentropy", optimizer="adam")
stage1_dis = build_stage1_discriminator()
stage1_dis.compile(loss='binary_crossentropy', optimizer=dis_optimizer)
stage1_gen = build_stage1_generator()
stage1_gen.compile(loss="mse", optimizer=gen_optimizer)
embedding_compressor_model = build_embedding_compressor_model()
embedding_compressor_model.compile(loss="binary_crossentropy", optimizer="adam")
adversarial_model = build_adversarial_model(gen_model=stage1_gen, dis_model=stage1_dis)
adversarial_model.compile(loss=['binary_crossentropy', KL_loss], loss_weights=[1, 2.0],
optimizer=gen_optimizer, metrics=None)
tensorboard = TensorBoard(log_dir="logs/".format(time.time()))
tensorboard.set_model(stage1_gen)
tensorboard.set_model(stage1_dis)
tensorboard.set_model(ca_model)
tensorboard.set_model(embedding_compressor_model)
# Generate an array containing real and fake values
# Apply label smoothing as well
real_labels = np.ones((batch_size, 1), dtype=float) * 0.9
fake_labels = np.zeros((batch_size, 1), dtype=float) * 0.1
for epoch in range(epochs):
print("========================================")
print("Epoch is:", epoch)
print("Number of batches", int(X_train.shape[0] / batch_size))
gen_losses = []
dis_losses = []
# Load data and train model
number_of_batches = int(X_train.shape[0] / batch_size)
for index in range(number_of_batches):
print("Batch:{}".format(index+1))
"""
Train the discriminator network
"""
# Sample a batch of data
z_noise = np.random.normal(0, 1, size=(batch_size, z_dim))
image_batch = X_train[index * batch_size:(index + 1) * batch_size]
embedding_batch = embeddings_train[index * batch_size:(index + 1) * batch_size]
image_batch = (image_batch - 127.5) / 127.5
# Generate fake images
fake_images, _ = stage1_gen.predict([embedding_batch, z_noise], verbose=3)
# Generate compressed embeddings
compressed_embedding = embedding_compressor_model.predict_on_batch(embedding_batch)
compressed_embedding = np.reshape(compressed_embedding, (-1, 1, 1, condition_dim))
compressed_embedding = np.tile(compressed_embedding, (1, 4, 4, 1))
dis_loss_real = stage1_dis.train_on_batch([image_batch, compressed_embedding],
np.reshape(real_labels, (batch_size, 1)))
dis_loss_fake = stage1_dis.train_on_batch([fake_images, compressed_embedding],
np.reshape(fake_labels, (batch_size, 1)))
dis_loss_wrong = stage1_dis.train_on_batch([image_batch[:(batch_size - 1)], compressed_embedding[1:]],
np.reshape(fake_labels[1:], (batch_size-1, 1)))
d_loss = 0.5 * np.add(dis_loss_real, 0.5 * np.add(dis_loss_wrong, dis_loss_fake))
print("d_loss_real:{}".format(dis_loss_real))
print("d_loss_fake:{}".format(dis_loss_fake))
print("d_loss_wrong:{}".format(dis_loss_wrong))
print("d_loss:{}".format(d_loss))
"""
Train the generator network
"""
g_loss = adversarial_model.train_on_batch([embedding_batch, z_noise, compressed_embedding],[K.ones((batch_size, 1)) * 0.9, K.ones((batch_size, 256)) * 0.9])
print("g_loss:{}".format(g_loss))
dis_losses.append(d_loss)
gen_losses.append(g_loss)
"""
Save losses to Tensorboard after each epoch
"""
write_log(tensorboard, 'discriminator_loss', | np.mean(dis_losses) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
def plot_reliability_diagram(score, labels, linspace, scores_set, legend_set,
alpha=1, scatter_prop=0.0, fig=None, n_bins=10,
bins_count=True, title=None, **kwargs):
'''
Parameters
==========
scores_set : list of array_like of floats
List of scores given by different methods, the first one is always the
original one
labels : array_like of ints
Labels corresponding to the scores
legend_set : list of strings
Description of each array in the scores_set
alpha : float
Laplace regularization when computing the elements in the bins
scatter_prop : float
If original first specifies the proportion of points (score, label) to
show
fig : matplotlib.pyplot.figure
Plots the axis in the given figure
bins_count : bool
If True, show the number of samples in each bin
Regurns
=======
fig : matplotlib.pyplot.figure
Figure with the reliability diagram
'''
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if title is not None:
ax.set_title(title)
n_lines = len(legend_set)
# Draw the empirical values in a histogram style
# TODO careful that now the min and max depend on the scores
s_min = min(score)
s_max = max(score)
bins = np.linspace(s_min, s_max, n_bins+1)
hist_tot = | np.histogram(score, bins=bins) | numpy.histogram |
import abc
import copy
import csv
import os
import typing
import warnings
from numbers import Number
from collections import defaultdict
from collections.abc import Sequence
import numpy as np
import scipy.interpolate
import scipy.signal
from slippy.core import _MaterialABC, _SurfaceABC
from .ACF_class import ACF
from .roughness_funcs import get_height_of_mat_vr, low_pass_filter
from .roughness_funcs import get_mat_vr, get_summit_curvatures
from .roughness_funcs import roughness, subtract_polynomial, find_summits
__all__ = ['Surface', 'assurface', 'read_surface', '_Surface', '_AnalyticalSurface', 'RollingSurface']
def assurface(profile, grid_spacing=None):
""" make a surface from a profile
Parameters
----------
profile : array-like
The surface profile
grid_spacing : float optional (None)
The spacing between grid points on the surface
Returns
-------
surface : Surface object
A surface object with the specified profile and grid size
See Also
--------
Surface
read_surface
Notes
-----
Examples
--------
>>> profile=np.random.normal(size=[10,10])
>>> my_surface=assurface(profile, 0.1)
>>> my_surface.extent
[1,1]
"""
return Surface(profile=profile, grid_spacing=grid_spacing)
def read_surface(file_name, **kwargs):
""" Read a surface from a file
Parameters
----------
file_name : str
The full path to the data file
Other Parameters
----------------
delim : str optional (',')
The delimiter used in the data file, only needed for csv or txt files
p_name : str optional ('profile')
The name of the variable containing the profile data, needed if a .mat
file is given
gs_name : str optional ('grid_spacing')
The name of the variable containing the grid_spacing, needed if a .mat
file is given
Returns
-------
A surface object generated from the file
See Also
--------
Surface
alicona_read
scipy.io.loadmat
Notes
-----
This function directly invokes the surface class, any other keywords that
can be passed to that class can be passed to this function
Examples
--------
>>> # Read a csv file with tab delimiters
>>> my_surface=read_surface('data.csv', delim='\t')
>>> # Read a .al3d file
>>> my_surface=read_surface('data.al3d')
>>> # Read a .mat file with variables called prof and gs
>>> my_surface=read_surface('data.mat', p_name='prof', gs_name='gs')
"""
return Surface(file_name=file_name, **kwargs)
class _Surface(_SurfaceABC):
"""
An abstract base class for surface types, this class should be extended to given new types of surface. To create an
analytical surface please subclass _AnalyticalSurface
"""
# The surface class for discrete surfaces (typically experimental)
is_discrete: bool = False
""" A bool flag, True if there is a profile present """
acf: typing.Optional[ACF] = None
""" The auto correlation function of the surface profile """
psd: typing.Optional[np.ndarray] = None
""" The power spectral density of the surface """
fft: typing.Optional[np.ndarray] = None
""" The fast fourier transform of the surface """
surface_type: str = "Generic"
""" A description of the surface type """
dimensions: typing.Optional[int] = 2
""" The number of spatial dimensions that """
is_analytic: bool = False
_material: typing.Optional[_MaterialABC] = None
unworn_profile: typing.Optional[np.ndarray] = None
_profile: typing.Optional[np.ndarray] = None
_grid_spacing: typing.Optional[float] = None
_shape: typing.Optional[tuple] = None
_extent: typing.Optional[tuple] = None
_inter_func = None
_allowed_keys = {}
_mask: typing.Optional[np.ndarray] = None
_size: typing.Optional[int] = None
_subclass_registry = []
_original_extent = None
wear_volumes: typing.Optional[defaultdict] = None
def __init__(self, grid_spacing: typing.Optional[float] = None, extent: typing.Optional[tuple] = None,
shape: typing.Optional[tuple] = None, is_discrete: bool = False):
if grid_spacing is not None and extent is not None and shape is not None:
raise ValueError("Up to two of grid_spacing, extent and size should be set, all three were set")
self.is_discrete = is_discrete
if grid_spacing is not None:
self.grid_spacing = grid_spacing
if extent is not None:
self.extent = extent
if shape is not None:
self.shape = shape
@classmethod
def __init_subclass__(cls, is_abstract=False, **kwargs):
super().__init_subclass__(**kwargs)
if not is_abstract:
_Surface._subclass_registry.append(cls)
@property
def size(self):
"""The total number of points in the surface"""
return self._size
@property
def mask(self):
"""A mask used to exclude some values from analysis, a single float or an array of bool the same size as profile
Either a boolean array of size self.size or a float of the value to be excluded
"""
return self._mask
@mask.setter
def mask(self, value: typing.Union[float, np.ndarray]):
if type(value) is float:
if np.isnan(value):
mask = np.isnan(self.profile)
else:
mask = self.profile == value
elif isinstance(value, np.ndarray):
mask = np.asarray(value, dtype=bool)
if not mask.shape == self.shape:
msg = ("profile and mask shapes do not match: profile is"
"{profile.shape}, mask is {mask.shape}".format(**locals()))
raise TypeError(msg)
elif isinstance(value, str):
raise TypeError('Mask cannot be a string')
elif isinstance(value, Sequence):
mask = np.zeros_like(self.profile, dtype=bool)
for item in value:
self.mask = item
mask = np.logical_and(self._mask, mask)
else:
raise TypeError("Mask type is not recognised")
self._mask = mask
@mask.deleter
def mask(self):
self._mask = None
@property
def extent(self):
""" The overall dimensions of the surface in the same units as grid spacing
"""
return self._extent
@extent.setter
def extent(self, value: typing.Sequence[float]):
if not isinstance(value, Sequence):
msg = "Extent must be a Sequence, got {}".format(type(value))
raise TypeError(msg)
if len(value) > 2:
raise ValueError("Too many elements in extent, must be a maximum of two dimensions")
if self.profile is not None:
p_aspect = (self.shape[0]) / (self.shape[1])
e_aspect = value[0] / value[1]
if abs(e_aspect - p_aspect) > 0.0001:
msg = "Extent aspect ratio doesn't match profile aspect ratio"
raise ValueError(msg)
else:
self._extent = tuple(value)
self._grid_spacing = value[0] / (self.shape[0])
else:
self._extent = tuple(value)
self.dimensions = len(value)
if self.grid_spacing is not None:
self._shape = tuple([int(v / self.grid_spacing) for v in value])
self._size = np.product(self._shape)
if self._shape is not None:
self._grid_spacing = self._extent[0] / self._shape[0]
self._extent = tuple([sz * self._grid_spacing for sz in self._shape])
return
@extent.deleter
def extent(self):
self._extent = None
self._grid_spacing = None
if self.profile is None:
self._shape = None
self._size = None
@property
def shape(self):
"""The shape of the surface profile array, the number of points in each direction
"""
return self._shape
@shape.setter
def shape(self, value: typing.Sequence[int]):
if not isinstance(value, Sequence):
raise ValueError(f"Shape should be a Sequence type, got: {type(value)}")
if self._profile is not None:
raise ValueError("Cannot set shape when profile is present")
self._shape = tuple([int(x) for x in value])
self._size = np.product(self._shape)
if self.grid_spacing is not None:
self._extent = tuple([v * self.grid_spacing for v in value])
elif self.extent is not None:
self._grid_spacing = self._extent[0] / self._shape[0]
self._extent = tuple([sz * self.grid_spacing for sz in self.shape])
@shape.deleter
def shape(self):
if self.profile is None:
self._shape = None
self._size = None
self._extent = None
self._grid_spacing = None
else:
msg = "Cannot delete shape with a surface profile set"
raise ValueError(msg)
@property
def profile(self):
"""The height data for the surface profile
"""
return self._profile
@profile.setter
def profile(self, value: np.ndarray):
"""Sets the profile property
"""
if value is None:
return
try:
self.unworn_profile = np.asarray(value, dtype=float).copy()
# this has to be before _profile is set (rewritten for rolling surface)
self.wear_volumes = defaultdict(lambda: np.zeros_like(self.unworn_profile))
self._profile = np.asarray(value, dtype=float).copy()
except ValueError:
msg = "Could not convert profile to array of floats, profile contains invalid values"
raise ValueError(msg)
self._shape = self._profile.shape
self._size = self._profile.size
self.dimensions = len(self._profile.shape)
if self.grid_spacing is not None:
self._extent = tuple([self.grid_spacing * p for p in self.shape])
elif self.extent is not None:
if self.dimensions == 1:
self._grid_spacing = (self.extent[0] / self.shape[0])
if self.dimensions == 2:
e_aspect = self.extent[0] / self.extent[1]
p_aspect = self.shape[0] / self.shape[1]
if abs(e_aspect - p_aspect) < 0.0001:
self._grid_spacing = (self.extent[0] / self.shape[0])
else:
warnings.warn("Global size does not match profile size,"
" global size has been deleted")
self._extent = None
@profile.deleter
def profile(self):
self.unworn_profile = None
self._profile = None
del self.shape
del self.extent
del self.mask
self.wear_volumes = None
self.is_discrete = False
@property
def grid_spacing(self):
"""The distance between grid points in the x and y directions
"""
return self._grid_spacing
@grid_spacing.setter
def grid_spacing(self, grid_spacing: float):
if grid_spacing is None:
return
if not isinstance(grid_spacing, float):
try:
# noinspection PyTypeChecker
grid_spacing = float(grid_spacing)
except ValueError:
msg = ("Invalid type, grid spacing of type {} could not be "
"converted into float".format(type(grid_spacing)))
raise ValueError(msg)
if np.isinf(grid_spacing):
msg = "Grid spacing must be finite"
raise ValueError(msg)
self._grid_spacing = grid_spacing
if self.profile is None:
if self.extent is not None:
self._shape = tuple([int(sz / grid_spacing) for sz in self.extent])
self._size = np.product(self._shape)
self._extent = tuple([sz * grid_spacing for sz in self._shape])
elif self.shape is not None:
self._extent = tuple([grid_spacing * pt for pt in self.shape])
else:
self._extent = tuple([s * grid_spacing for s in self.shape])
@grid_spacing.deleter
def grid_spacing(self):
self._extent = None
self._grid_spacing = None
if self.profile is None:
del self.shape
@property
def material(self):
""" A material object describing the properties of the surface """
return self._material
@material.setter
def material(self, value):
if isinstance(value, _MaterialABC):
self._material = value
else:
raise ValueError("Unable to set material, expected material object"
" received %s" % str(type(value)))
@material.deleter
def material(self):
self._material = None
def wear(self, name: str, x_pts: np.ndarray, y_pts: np.ndarray, depth: np.ndarray):
"""
Add wear / geometry changes to the surface profile
Parameters
----------
name: str
Name of the source of wear
x_pts: np.ndarray
The x locations of the worn points in length units
y_pts: np.ndarray
The y locations of the worn points in length units
depth: np.ndarray
The depth to wear each point, negative values will add height
"""
if not x_pts.size == y_pts.size == depth.size:
raise ValueError(f"X, Y locations and wear depths are not the same size for wear '{name}':\n"
f"x:{x_pts.size}\n"
f"y:{y_pts.size}\n"
f"depth:{depth.size}")
if np.any(np.isnan(depth)):
raise ValueError(f"Some wear depth values are nan for wear {name}")
# equivalent to rounding and applying wear to nearest node
x_ind = np.array(x_pts / self.grid_spacing + self.grid_spacing/2, dtype=np.uint16)
y_ind = np.array(y_pts / self.grid_spacing + self.grid_spacing/2, dtype=np.uint16)
self.wear_volumes[name][y_ind, x_ind] += depth
self._profile[y_ind, x_ind] -= depth
self._inter_func = None # force remaking the interpolator if the surface has been worn
def get_fft(self, profile_in=None):
""" Find the fourier transform of the surface
Finds the fft of the surface and stores it in your_instance.fft
Parameters
----------
profile_in : array-like optional (None)
If set the fft of profile_in will be found and returned otherwise
instances profile attribute is used
Returns
-------
transform : array
The fft of the instance's profile or the profile_in if one is
supplied
See Also
--------
get_psd
get_acf
show
Notes
-----
Uses numpy fft.fft or fft.fft2 depending on the shape of the profile
Examples
--------
>>># Set the fft property of the surface
>>> import slippy.surface as s
>>> my_surface = s.assurface([[1,2],[3,4]], grid_spacing=1)
>>>my_surface.get_fft()
>>># Return the fft of a provided profile
>>>fft_of_profile_2=my_surface.get_fft(np.array([[1,2],[3,4]]))
"""
if profile_in is None:
profile = self.profile
else:
profile = profile_in
try:
if len(profile.shape) == 1:
transform = np.fft.fft(profile)
if type(profile_in) is bool:
self.fft = transform
else:
transform = np.fft.fft2(profile)
if type(profile_in) is bool:
self.fft = transform
except AttributeError:
raise AttributeError('Surface must have a defined profile for fft'
' to be used')
if profile_in is None:
self.fft = transform
else:
return transform
def get_acf(self, profile_in=None):
""" Find the auto correlation function of the surface
Finds the ACF of the surface and stores it in your_instance.acf
Parameters
----------
profile_in : array-like optional (None)
Returns
-------
output : ACF object
An acf object with the acf data stored, the values can be extracted
by numpy.array(output)
See Also
--------
get_psd
get_fft
show
slippy.surface.ACF
Notes
-----
ACF data is kept in ACF objects, these can then be interpolated or
evaluated at specific points with a call:
Examples
--------
>>> import slippy.surface as s
>>> my_surface = s.assurface([[1,2],[3,4]], grid_spacing=1)
>>> # Sets the acf property of the surface with an ACF object
>>> my_surface.get_acf()
>>> # The acf values are then given by the following
>>> np.array(my_surface.acf)
>>> # The acf can be shown using the show function:
>>> my_surface.show('acf', 'image')
>>> # Finding the ACF of a provided profile:
>>> ACF_object_for_profile_2=my_surface.get_acf(np.array([[4, 3], [2, 1]]))
>>> # equivalent to ACF(profile_2)
"""
if profile_in is None:
# noinspection PyTypeChecker
self.acf = ACF(self)
else:
profile = np.asarray(profile_in)
# noinspection PyTypeChecker
output = np.array(ACF(profile))
return output
def get_psd(self):
""" Find the power spectral density of the surface
Finds the PSD of the surface and stores it in your_instance.psd
Parameters
----------
(None)
Returns
-------
(None), sets the psd attribute of the instance
See Also
--------
get_fft
get_acf
show
Notes
-----
Finds the psd by fourier transforming the ACF, in doing so looks for
the instance's acf property. If this is not found the acf is calculated
and set.
Examples
--------
>>> # sets the psd attribute of my_surface
>>> import slippy.surface as s
>>> my_surface = s.assurface([[1,2],[3,4]], grid_spacing=1)
>>> my_surface.get_psd()
"""
# PSD is the fft of the ACF (https://en.wikipedia.org/wiki/Spectral_density#Power_spectral_density)
if self.acf is None:
self.get_acf()
# noinspection PyTypeChecker
self.psd = self.get_fft(np.asarray(self.acf))
def subtract_polynomial(self, order, mask=None):
""" Flatten the surface by subtracting a polynomial
Alias for :func:`~slippy.surface.subtract_polynomial` function
"""
if mask is None:
mask = self.mask
new_profile, coefs = subtract_polynomial(self.profile, order, mask)
self.profile = new_profile
return coefs
def roughness(self, parameter_name, mask=None, curved_surface=False,
no_flattening=False, filter_cut_off=None,
four_nearest=False):
""" Find areal roughness parameters
Alias for :func:`~slippy.surface.roughness` function
"""
if mask is None:
mask = self.mask
out = roughness(self, parameter_name, mask=mask,
curved_surface=curved_surface,
no_flattening=no_flattening,
filter_cut_off=filter_cut_off,
four_nearest=four_nearest)
return out
def get_mat_vr(self, height, void=False, mask=None, ratio=True):
""" Find the material or void volume ratio for a given height
Alias for :func:`~slippy.surface.get_mat_vr` function
"""
if mask is None:
mask = self.mask
return get_mat_vr(height, profile=self.profile, void=void, mask=mask,
ratio=ratio)
def get_height_of_mat_vr(self, ratio, void=False, mask=None,
accuracy=0.001):
""" Find the height of a given material or void volume ratio
Alias for :func:`~slippy.surface.get_height_of_mat_vr` function
"""
if mask is None:
mask = self.mask
return get_height_of_mat_vr(ratio, self.profile, void=void, mask=mask,
accuracy=accuracy)
def get_summit_curvature(self, summits=None, mask=None,
filter_cut_off=None, four_nearest=False):
""" Get summit curvatures
Alias for :func:`~slippy.surface.get_summit_curvature` function
"""
if mask is None:
mask = self.mask
return get_summit_curvatures(self.profile, summits=summits, mask=mask,
filter_cut_off=filter_cut_off,
four_nearest=four_nearest, grid_spacing=self.grid_spacing)
def find_summits(self, mask=None, four_nearest=False, filter_cut_off=None,
invert=False):
""" Find summits after low pass filtering
Alias for :func:`~slippy.surface.find_summits` function
"""
if mask is None:
mask = self.mask
if invert:
return find_summits(self.profile * -1,
grid_spacing=self.grid_spacing, mask=mask,
four_nearest=four_nearest,
filter_cut_off=filter_cut_off)
else:
return find_summits(self, mask=mask, four_nearest=four_nearest,
filter_cut_off=filter_cut_off)
def low_pass_filter(self, cut_off_freq, return_copy=False):
""" Low pass FIR filter the surface profile
Alias for :func:`~slippy.surface.low_pass_filter` function
"""
if return_copy:
return low_pass_filter(self, cut_off_freq)
else:
self.profile = low_pass_filter(self, cut_off_freq)
def resample(self, new_grid_spacing=None, return_profile=False, remake_interpolator=False):
""" Resample or crop the profile by interpolation
Parameters
----------
new_grid_spacing : float, optional (None)
The grid spacing on the new surface, if the grid_spacing is not set on the current surface it is assumed to
be 1
return_profile : bool, optional (False)
If true the interpolated profile is returned otherwise it is set as the profile of the instance
remake_interpolator : bool, optional (False)
If true any memoized interpolator will be deleted and remade based on the current profile before
interpolation, see notes.
Returns
-------
new_profile : array
If return_profile is True the interpolated profile is returned
See Also
--------
rotate
fill_holes
surface_like
Notes
-----
On the first call this function will make an interpolator object which
is used to interpolate, on subsequent calls this object is found and
used resulting in no loss of quality. If the remake_interpolator key
word is set to true this interpolator is remade. This will result in a
loss of quality for subsequent calls but is necessary if the profile
property has changed.
This method does not support masking.
The profile should have nan or inf values removed by the fill_holes
method before running this
Examples
--------
>>> import numpy as np
>>> import slippy.surface as s
>>> profile=np.random.normal(size=(101,101))
>>> my_surface=s.assurface(profile, grid_spacing=1)
>>> # interpolate on a coarse grid:
>>> my_surface.resample(10)
>>> # check shape:
>>> my_surface.shape
(11,11)
>>> # restore original profile:
>>> my_surface.resample(1)
>>> my_surface.shape
(101,101)
"""
gs_changed = False
if self.grid_spacing is None:
gs_changed = True
self.grid_spacing = 1
if remake_interpolator or self._inter_func is None:
self._original_extent = self.extent
x0 = np.arange(0, self.extent[0], self.grid_spacing)
y0 = np.arange(0, self.extent[1], self.grid_spacing)
self._inter_func = scipy.interpolate.RectBivariateSpline(x0, y0, self.profile)
x1 = np.arange(0, self._original_extent[0], new_grid_spacing)
y1 = np.arange(0, self._original_extent[1], new_grid_spacing)
new_profile = self._inter_func(x1, y1)
if gs_changed:
del self.grid_spacing
if return_profile:
return new_profile
else:
self.profile = new_profile
if not gs_changed:
self.grid_spacing = new_grid_spacing
def __add__(self, other):
if not isinstance(other, _Surface):
return Surface(profile=self.profile + other, grid_spacing=self.grid_spacing)
if self.grid_spacing is not None and other.grid_spacing is not None and self.grid_spacing != other.grid_spacing:
if self.grid_spacing < other.grid_spacing:
prof_2 = other.resample(self.grid_spacing, return_profile=True)
prof_1 = self.profile
new_gs = self.grid_spacing
else:
prof_1 = self.resample(other.grid_spacing, return_profile=True)
prof_2 = other.profile
new_gs = other.grid_spacing
else:
prof_1 = self.profile
prof_2 = other.profile
if self.grid_spacing is not None:
new_gs = self.grid_spacing
else:
new_gs = other.grid_spacing
new_shape = [min(p1s, p2s) for p1s, p2s in zip(prof_1.shape, prof_2.shape)]
new_profile = prof_1[0:new_shape[0], 0:new_shape[1]] + prof_2[0:new_shape[0], 0:new_shape[1]]
return Surface(profile=new_profile, grid_spacing=new_gs)
def __mul__(self, other):
if isinstance(other, Number):
return Surface(profile=self.profile*other, grid_spacing=self.grid_spacing)
else:
raise NotImplementedError("Multiplication not implement for Surfaces unless other parameter is number")
def __div__(self, other):
if isinstance(other, Number):
return Surface(profile=self.profile/other, grid_spacing=self.grid_spacing)
else:
raise NotImplementedError("Division not implement for Surfaces unless other parameter is number")
def __sub__(self, other):
if not isinstance(other, _Surface):
return Surface(profile=self.profile - other, grid_spacing=self.grid_spacing)
if self.grid_spacing is not None and other.grid_spacing is not None and self.grid_spacing != other.grid_spacing:
if self.grid_spacing < other.grid_spacing:
prof_2 = other.resample(self.grid_spacing, return_profile=True)
prof_1 = self.profile
new_gs = self.grid_spacing
else:
prof_1 = self.resample(other.grid_spacing, return_profile=True)
prof_2 = other.profile
new_gs = other.grid_spacing
else:
prof_1 = self.profile
prof_2 = other.profile
if self.grid_spacing is not None:
new_gs = self.grid_spacing
else:
new_gs = other.grid_spacing
new_shape = [min(p1s, p2s) for p1s, p2s in zip(prof_1.shape, prof_2.shape)]
new_profile = prof_1[0:new_shape[0], 0:new_shape[1]] - prof_2[0:new_shape[0], 0:new_shape[1]]
return Surface(profile=new_profile, grid_spacing=new_gs)
def __eq__(self, other):
if not isinstance(other, _Surface) or self.is_discrete != other.is_discrete:
return False
if self.is_discrete:
return np.array_equal(self.profile, other.profile) and self.grid_spacing == other.grid_spacing
else:
return repr(self) == repr(other)
def show(self, property_to_plot: typing.Union[str, typing.Sequence[str]] = 'profile',
plot_type: typing.Union[str, typing.Sequence[str]] = 'default', ax=False, *, dist=None, stride=None,
**figure_kwargs):
""" Plot surface properties
Parameters
----------
property_to_plot : str or list of str length N optional ('profile')
The property to be plotted see notes for supported names
plot_type : str or list of str length N optional ('default')
The type of plot to be produced, see notes for supported types
ax : matplotlib axes or False optional (False)
If supplied the plot will be added to the axis
dist : a scipy probability distribution, optional (None)
Only used if probplot is requested, the probability distribution
to plot against
stride : float, optional (None)
Only used if a wire frame plot is requested, the stride between
wires
figure_kwargs : optional (None)
Keyword arguments sent to the figure function in matplotlib
Returns
-------
ax : matplotlib axes or list of matplotlib axes length N
The axis with the plot
See Also
--------
get_fft
get_psd
get_acf
ACF
Notes
-----
If fft, psd or acf are requested the field of the surface is filled
by the relevant get_ method before plotting.
The grid spacing attribute should be set before plotting
2D and 1D plots can be produced. 2D properties are:
- profile - surface profile
- unworn_profile - the surface profile with no wear applied
- fft2d - fft of the surface profile
- psd - power spectral density of the surface profile
- acf - auto correlation function of the surface
- apsd - angular power spectral density of the profile
Plot types allowed for 2D plots are:
- surface (default)
- image
- mesh
If a mesh plot is requested the distance between lines in the mesh can
be specified with the stride keyword
1D properties are:
- histogram - histogram of the profile heights
- fft1d - 1 dimentional fft of the surface
- qq - quartile quartile plot of the surface heights
If qq or dist hist are requested the distribution to be plotted against
the height values can be specified by the dist keyword
Each of the 1D properties can only be plotted on it's default plot type
Examples
--------
>>> # show the surface profile as an image:
>>> import slippy.surface as s
>>> import numpy as np
>>> my_surface=s.assurface(np.random.rand(10,10))
>>> my_surface.show('profile', 'image')
>>> # show the 2D fft of the surface profile with a range of plot types
>>> my_surface.show(['fft2D','fft2D','fft2D'], ['mesh', 'image', 'default'])
"""
import matplotlib.pyplot as plt
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
from scipy.stats import probplot
if self.profile is None:
raise AttributeError('The profile of the surface must be set before it can be shown')
if self.grid_spacing is None:
raise AttributeError("The grid spacing of the surface must be set before it can be shown")
types2d = ['profile', 'fft2d', 'psd', 'acf', 'apsd', 'unworn_profile']
types1d = ['histogram', 'fft1d', 'qq', 'hist']
# using a recursive call to deal with multiple plots on the same fig
if isinstance(property_to_plot, Sequence) and not isinstance(property_to_plot, str):
number_of_subplots = len(property_to_plot)
if not type(ax) is bool:
msg = ("Can't plot multiple plots on single axis, "
'making new figure')
warnings.warn(msg)
if isinstance(plot_type, Sequence) and not isinstance(plot_type, str):
plot_type = list(plot_type)
if len(plot_type) < number_of_subplots:
plot_type.extend(['default'] * (number_of_subplots - len(plot_type)))
else:
plot_type = [plot_type, ] * number_of_subplots
# 11, 12, 13, 22, then filling up rows of 3 (unlikely to be used)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
if len(property_to_plot) < 5:
n_cols = [1, 2, 3, 2][number_of_subplots - 1]
else:
n_cols = 3
n_rows = int(np.ceil(number_of_subplots / 3))
fig = plt.figure(**figure_kwargs)
ax = []
sub_plot_number = 100 * n_rows + 10 * n_cols + 1
for i in range(number_of_subplots):
if property_to_plot[i].lower() in types2d and not plot_type[i] in ('image', 'default'):
ax.append(fig.add_subplot(sub_plot_number + i, projection='3d'))
else:
ax.append(fig.add_subplot(sub_plot_number + i))
self.show(property_to_plot[i], plot_type[i], ax[i])
return fig, ax
#######################################################################
# main method
#######################################################################
# 2D plots
try:
property_to_plot = property_to_plot.lower()
except AttributeError:
msg = "Property to plot must be a string or a list of strings"
raise ValueError(msg)
if not (property_to_plot in types2d or property_to_plot in types1d):
msg = ('Unsupported property to plot see documentation for details'
', type given: \n' + str(property_to_plot) + ' \nsupported ty'
'pes: \n' + ' '.join(types2d + types1d))
raise ValueError(msg)
if not ax:
fig = plt.figure(**figure_kwargs)
if property_to_plot in types2d:
if not ax and (plot_type == 'image' or plot_type == 'default'):
# noinspection PyUnboundLocalVariable
ax = fig.add_subplot(111)
elif not ax:
# noinspection PyUnboundLocalVariable
ax = fig.add_subplot(111, projection='3d')
if property_to_plot == 'profile':
labels = ['Surface profile', 'x', 'y', 'Height']
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
z = self.profile
elif property_to_plot == 'unworn_profile':
labels = ['Surface profile (unworn)', 'x', 'y', 'Height']
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
z = self.unworn_profile
elif property_to_plot == 'fft2d':
labels = ['Fourier transform of surface', 'u', 'v', '|F(x)|']
if self.fft is None:
self.get_fft()
z = np.abs(np.fft.fftshift(self.fft))
x = np.fft.fftfreq(self.shape[0], self.grid_spacing)
y = np.fft.fftfreq(self.shape[1], self.grid_spacing)
elif property_to_plot == 'psd':
labels = ['Power spectral density', 'u', 'v', 'Power/ frequency']
if self.psd is None:
self.get_psd()
# noinspection PyTypeChecker
z = np.log(np.abs(np.fft.fftshift(self.psd)))
x = np.fft.fftfreq(self.shape[0], self.grid_spacing)
y = np.fft.fftfreq(self.shape[1], self.grid_spacing)
elif property_to_plot == 'acf':
labels = ['Auto correlation function', 'x', 'y',
'Surface auto correlation']
if self.acf is None:
self.get_acf()
# noinspection PyTypeChecker
z = np.abs(np.asarray(self.acf))
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
x = x - max(x) / 2
y = y - max(y) / 2
elif property_to_plot == 'apsd':
labels = ['Angular power spectral density', 'x', 'y']
if self.fft is None:
self.get_fft()
p_area = (self.shape[0] - 1) * (self.shape[1] - 1) * self.grid_spacing ** 2
z = self.fft * np.conj(self.fft) / p_area
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
x = x - max(x) / 2
y = y - max(y) / 2
else:
raise ValueError("Property not recognised")
mesh_x, mesh_y = np.meshgrid(x, y)
if plot_type == 'surface':
ax.plot_surface(mesh_x, mesh_y, np.transpose(z))
# plt.axis('equal')
ax.set_zlabel(labels[3])
elif plot_type == 'mesh':
if property_to_plot == 'psd' or property_to_plot == 'fft2d':
mesh_x, mesh_y = | np.fft.fftshift(mesh_x) | numpy.fft.fftshift |
import pandas as pd
import joblib
import numpy as np
import argparse
import os
# Inputs:
# --sct_train_file: Pickle file that was holds the a list of the dataset used for training.
# Can be downloaded at: https://github.com/sct-data/deepseg_sc_models
# train_valid_test column: 1 for training, 2 for validating, 3 for testing
# --bids_datasets_list: List of dataset folders to gather list of subjects from.
# 1 or more (e.g. sct-testing-large spine-generic-multi-subject etc.)
# --ofolder: Folder to save the output .joblib file
# Example usage:
# python3 create_training_joblib --sct_train_file ~/dataset.pkl --bids_datasets_list ~/datasets/testing-large
# --ofolder ~/train_new_model
#
# <NAME> 2021
def create_new_joblib(dataset_sct_file, input_bids_folders, outputFolder):
## Load the merged participants.tsv
#merged_folder = '/home/nas/Consulting/ivado-project/Datasets/merged_SCTLARGE_MULTISUBJECT/'
#df_merged = bids.BIDS(merged_folder).participants.content
# Merge multiple .tsv files into the same dataframe
df_merged = pd.read_table(os.path.join(input_bids_folders[0], 'participants.tsv'), encoding="ISO-8859-1")
# Convert to string to get rid of potential TypeError during merging within the same column
df_merged = df_merged.astype(str)
# Add the Bids_path to the dataframe
df_merged['bids_path'] = [input_bids_folders[0]] * len(df_merged)
for iFolder in range(1, len(input_bids_folders)):
df_next = pd.read_table(os.path.join(input_bids_folders[iFolder], 'participants.tsv'), encoding="ISO-8859-1")
df_next = df_next.astype(str)
df_next['bids_path'] = [input_bids_folders[iFolder]] * len(df_next)
# Merge the .tsv files (This keeps also non-overlapping fields)
df_merged = pd.merge(left=df_merged, right=df_next, how='outer')
dataUsedOnSct = pd.read_pickle(dataset_sct_file)
# Force the subjects that were used for testing for SCT models to be used for testing in the new .joblib
subjectsUsedForTesting = dataUsedOnSct[dataUsedOnSct['train_valid_test'] == 3]['subject'].to_list()
# Use 60% for training/validation and 40% for testing
percentage_train = 0.4
percentage_validation = 0.2
# Whatever was used in sct testing, will stay in the testing side of the joblib as well
test = df_merged[ | np.in1d(df_merged['data_id'], subjectsUsedForTesting) | numpy.in1d |
Subsets and Splits