prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import random
from tqdm import tqdm
from collections import defaultdict
import os
from sklearn.cluster import KMeans
os.environ['JOBLIB_TEMP_FOLDER'] = '/tmp' # default runs out of space for parallel processing
class TaskGenerator(object):
def __init__(self, num_samples_per_class, args):
self.num_samples_per_class = num_samples_per_class
self.args = args
def make_unsupervised_dataset(self, data, partition, true_labels):
"""
Make unsupervised dataset associating the predicted labels to the corresponding images, sampling the fixed
number of elements per class and ordering data per labels
"""
new_labels = [-1] * len(data)
for idx, cluster in enumerate(list(partition.values())):
if len(cluster) > self.num_samples_per_class:
cluster = sorted(random.sample(cluster, self.num_samples_per_class))
for img in cluster:
new_labels[img] = list(partition.keys())[idx]
empty_indices = np.argwhere(np.asarray(new_labels) == -1).flatten()
new_data = np.delete(data, empty_indices, axis=0)
new_true_labels = | np.delete(true_labels, empty_indices, axis=0) | numpy.delete |
import warnings
import numpy as np
from cosymlib.tools import element_mass
def get_mass(symbols):
mass_vector = []
for symbol in symbols:
try:
mass_vector.append(element_mass(symbol))
except KeyError as e:
warnings.warn('Atomic mass of element {} not found, using 1 u'.format(e))
mass_vector.append(1.0)
return mass_vector
def get_center_mass(symbols, coordinates):
mass_vector = get_mass(symbols)
cbye = [ | np.dot(mass_vector[i], coordinates[i]) | numpy.dot |
from typing import Any
from typing import Tuple
from typing import List
from typing import Union
from typing import Sequence
from typing import Optional
from typing_extensions import Annotated
from nptyping import NDArray
from dataclasses import dataclass
from dataclasses import astuple
from numpy import asarray
from numpy import float64
from numpy import zeros_like
from scipy.sparse import diags
from compas.numerical import connectivity_matrix
from .result import Result
@dataclass
class FDNumericalData:
"""Stores numerical data used by the force density algorithms."""
free: int
fixed: int
xyz: NDArray[(Any, 3), float64]
C: NDArray[(Any, Any), int]
q: NDArray[(Any, 1), float64]
Q: NDArray[(Any, Any), float64]
p: NDArray[(Any, 1), float64]
A: NDArray[(Any, Any), float64]
Ai: NDArray[(Any, Any), float64]
Af: NDArray[(Any, Any), float64]
forces: NDArray[(Any, 1), float64] = None
lengths: NDArray[(Any, 1), float64] = None
residuals: NDArray[(Any, 3), float64] = None
tangent_residuals: NDArray[(Any, 3), float64] = None
normal_residuals: NDArray[(Any, 1), float64] = None
def __iter__(self):
return iter(astuple(self))
@classmethod
def from_params(cls,
vertices: Union[Sequence[Annotated[List[float], 3]], NDArray[(Any, 3), float64]],
fixed: List[int],
edges: List[Tuple[int, int]],
forcedensities: List[float],
loads: Optional[Union[Sequence[Annotated[List[float], 3]], NDArray[(Any, 3), float64]]] = None):
"""Construct numerical arrays from force density solver input parameters."""
free = list(set(range(len(vertices))) - set(fixed))
xyz = | asarray(vertices, dtype=float64) | numpy.asarray |
'''
Define optical components for gtrace.
'''
#{{{ Import modules
import numpy as np
pi = np.pi
array = np.array
sqrt = np.lib.scimath.sqrt
from numpy.linalg import norm
from traits.api import HasTraits, Int, Float, CFloat, CArray, List, Str
import gtrace.optics as optics
import gtrace.optics.geometric
from .unit import *
import copy
import gtrace.draw as draw
#}}}
#{{{ Author and License Infomation
#Copyright (c) 2011-2021, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "<NAME>"
__copyright__ = "Copyright 2011-2021, Yoichi Aso"
__credits__ = ["Yoichi Aso"]
__license__ = "BSD"
__version__ = "0.2.1"
__maintainer__ = "<NAME>"
__email__ = "yo<EMAIL>"
__status__ = "Beta"
#}}}
#{{{ Generic Optics Class
class Optics(HasTraits):
'''
A general optics class from which other specific
optics classes are derived.
Attributes
----------
name : str
Name of the optics.
center : array
Center position of the optics. array of shape(2,).
rotationAngle : float
This angle defines the orientation of the optics.
'''
name = Str()
center = CArray(dtype=np.float64, shape=(2,))
rotationAngle = CFloat(0.0) #in rad
#{{{ isHit(beam)
def isHit(beam):
'''
A function to see if a beam hits this optics or not.
Parameters
----------
beam : gtrace.beam.GaussianBeam
A GaussianBeam object to be interacted by the optics.
Returns
-------
Dict
The return value is a dictionary with the following keys:
``isHit, position, distance, face``
``isHit``:
This is a boolean to answer whether the beam hit the optics
or not.
``position``:
A numpy array containing the coordinate values of the intersection
point between the beam and the optics. If isHit is False, this parameter
does not mean anything.
``distance``
The distance between the beam origin and the intersection point.
``face``:
An optional string identifying which face of the optics was hit.
For example, ``face`` can be either "HR" or "AR" for a mirror.
``face`` can also be "side", meaning that the beam hits a side
of the optics, which is not meant to be used, e.g. the side of a mirror.
In this case, the beam have reached a dead end.
'''
#This is an abstract function
return {'isHit': False, 'position': np.array((0,0)),
'distance': 0.0, 'face':''}
#}}}
#{{{ hit(beam, order=0, threshold=0.0):
def hit(beam, order=0, threshold=0.0):
'''
A function to hit the optics with a beam.
This function attempts to hit the optics with the source beam, ``beam``.
Parameters
----------
beam : gtrace.beam.GaussianBeam
A GaussianBeam object to be interacted by the optics.
order : int, optional
An integer to specify how many times the internal reflections
are computed.
Defaults 0.
threshold : float, optional
The power threshold for internal reflection calculation.
If the power of an auxiliary beam falls below this threshold,
further propagation of this beam will not be performed.
Defaults 0.0.
Returns
-------
{boolean, dict, str}
``(isHit, beamDict, face)``
``isHit``
This is a boolean to answer whether the beam hit the optics
or not.
``beamDict``
A dictionary containing resultant beams.
``face``:
An optional string identifying which face of the optics was hit.
For a mirror, ``face`` is any of "HR", "AR" or "side".
'''
#This is an abstract function
return {False, {}, "side"} # Is this a bug? Shouldn't it be a tuple?
#}}}
#{{{ _isHitSurface_()
def _isHitSurface_(self, beam, surface_center, normal_vector,
surface_size=1.0, inv_ROC=0.0):
'''
Determine if a beam hit a surface
Parameters
----------
beam : gtrace.beam.GaussianBeam
A GaussianBeam object to be interacted by the optics.
Returns
-------
ans : dict
A dictionary with the following keys:
"isHit": A boolean value whether the beam hit the surface or not.
"Intersection Point": numpy array of the coordinates of the intersection point.
"distance": Distance between the origin of the beam and the intersection point.
"localNormVect": A numpy array representing the normal vector of the surface
at the intersection point.
"localNormAngle": The angle of the localNormVect.
'''
if np.abs(inv_ROC) < 1e-5:
ans = optics.geometric.line_plane_intersection(pos=beam.pos, dirVect=beam.dirVect,
plane_center=surface_center, normalVector=normal_vector,
diameter=surface_size)
localNormVect = normal_vector
localNormAngle = np.mod(np.arctan2(localNormVect[1],
localNormVect[0]), 2*pi)
ans['localNormVect'] = localNormVect
ans['localNormAngle'] = localNormAngle
return ans
else:
ans = optics.geometric.line_arc_intersection(pos=beam.pos, dirVect=beam.dirVect,
chord_center=surface_center,
chordNormVect=normal_vector,
invROC=inv_ROC,
diameter=surface_size)
return ans
#}}}
#}}}
#{{{ Mirror Class
class Mirror(Optics):
'''
Representing a partial reflective mirror.
Attributes
----------
curve_direction : str
Either 'h' or 'v'. If it is 'h' the mirror is curved in horizontal plane. If 'v', it is vertical.
HRcenter : array
The position of the center of the arc of the HR surface. shape(2,).
HRcenterC : array
The position of the center of the chord of the HR surface. shape(2,).
normVectHR : array
Normal vector of the HR surface. shape(2,)
normAngleHR : float
Angle of the HR normal vector. In radians.
ARcenter : array
The position of the center of the AR surface. shape(2,)
normVectAR : array
Normal vector of the HR surface. shape(2,)
normAngleAR : float
Angle of the HR normal vector. In radians.
HRtransmissive : boolean
A boolean value defaults to False. If True, this mirror
is supposed to transmit beams on the HR surface. Therefore,
for the first encounter of a beam on the HR surface of this mirror
will not increase the stray_order. This flag should be set to True for
beam splitters and input test masses.
term_on_HR : boolean
If this is True, a beam with stray_order <= self.term_on_HR_order will be terminated when
it hits on HR. This is to avoid the inifinite loop of non-sequencial
trace by forming a cavity.
term_on_HR_order : int
Integer to specify the upper limit of the stray order used to judge
whether to terminate the non sequential trace or not on HR reflection.
'''
#{{{ Traits definitions
HRcenter = CArray(dtype=np.float64, shape=(2,))
HRcenterC = CArray(dtype=np.float64, shape=(2,))
sagHR = CFloat()
normVectHR = CArray(dtype=np.float64, shape=(2,))
normAngleHR = CFloat()
ARcenter = CArray(dtype=np.float64, shape=(2,))
ARcenterC = CArray(dtype=np.float64, shape=(2,))
sagAR = CFloat()
normVectAR = CArray(dtype=np.float64, shape=(2,))
normAngleAR = CFloat()
diameter = CFloat(25.0*cm) #
ARdiameter = CFloat()
thickness = CFloat(15.0*cm) #
wedgeAngle = CFloat(0.25*pi/180) # in rad
n = CFloat(1.45) #Index of refraction
inv_ROC_HR = CFloat(1.0/7000.0) #Inverse of the ROC of the HR surface.
inv_ROC_AR = CFloat(0.0) #Inverse of the ROC of the AR surface.
Refl_HR = CFloat(99.0) #Power reflectivity of the HR side.
Trans_HR = CFloat(1.0) #Power transmittance of the HR side.
Refl_AR = CFloat(0.01) #Power reflectivity of the AR side.
Trans_AR = CFloat(99.99) #Power transmittance of the HR side.
#}}}
#{{{ __init__
def __init__(self, HRcenter=[0.0,0.0], normAngleHR=0.0,
normVectHR=None, diameter=25.0*cm, thickness=15.0*cm,
wedgeAngle=0.25*pi/180., inv_ROC_HR=1.0/7000.0, inv_ROC_AR=0.0,
Refl_HR=0.99, Trans_HR=0.01, Refl_AR=0.01, Trans_AR=0.99, n=1.45,
name="Mirror", HRtransmissive=False, term_on_HR=False):
'''
Create a mirror object.
Parameters
----------
HRcenter : array, optional
Position of the center of the HR surface.
Defaults [0.0, 0.0].
normAngleHR : float, optional
Direction angle of the normal vector of the HR surface. In radians.
Defaults 0.0.
normVectHR : arrary or None, optional
Normal vector of the HR surface. Should be an array of shape(2,).
Defaults None.
diameter : float, optional
Diameter of the mirror.
Defaults 25.0*cm.
thickness : float, optional
Thickness of the mirror.
Defaults 15.0*cm.
wedgeAngle : float, optional
Wedge angle between the HR and AR surfaces. In radians.
Defaults 0.25*pi/180.
inv_ROC_HR : float, optional
1/ROC of the HR surface.
Defaults 1.0/7000.0.
inv_ROC_AR : float, optional
1/ROC of the AR surface.
Defaults 0.0.
Refl_HR : float, optional
Power reflectivity of the HR surface.
Defaults 0.99.
Trans_HR : float, optional
Power transmissivity of the HR surface.
Defaults 0.01.
Refl_AR : float, optional
Power reflectivity of the AR surface.
Defaults 0.01.
Trans_AR : float, optional
Power transmissivity of the AR surface.
Defaults 0.99.
n : float, optional
Index of refraction.
Defaults 1.45.
name : str, optional
Name of the mirror.
Defaults "Mirror".
HRtransmissive : boolean, optional
If True, this mirror
is supposed to transmit beams on the HR surface. Therefore,
for the first encounter of a beam on the HR surface of this mirror
will not increase the stray_order. This flag should be set to True for
beam splitters and input test masses.
Defaults False
term_on_HR : boolean, optional
If this is True, a beam with stray_order <= self.term_on_HR_order
will be terminated when
it hits on HR. This is to avoid the inifinite loop of
non-sequencial
trace by forming a cavity.
Defaults False.
'''
self.diameter = diameter
#Compute the sag.
#Sag is positive for convex mirror.
if np.abs(inv_ROC_HR) > 1./(10*km):
R = 1./inv_ROC_HR
r = self.diameter/2
self.sagHR = - np.sign(R)*(np.abs(R) - np.sqrt(R**2 - r**2))
else:
self.sagHR = 0.0;
#Convert rotationAngle to normVectHR or vice versa.
if normVectHR is not None:
self.normVectHR = normVectHR
else:
self.normAngleHR = normAngleHR
self.HRcenter = HRcenter
self._HRcenter_changed(0,0)
self.thickness = thickness
self.wedgeAngle = wedgeAngle
self.ARdiameter = self.diameter/np.cos(self.wedgeAngle)
self.inv_ROC_HR = inv_ROC_HR
self.inv_ROC_AR = inv_ROC_AR
self.Refl_HR = Refl_HR
self.Trans_HR = Trans_HR
self.Refl_AR = Refl_AR
self.Trans_AR = Trans_AR
self.n = n
self._normAngleHR_changed(0,0)
self.name = name
self.HRtransmissive = HRtransmissive
self.term_on_HR = term_on_HR
self.term_on_HR_order = 0
#}}}
#{{{ copy
def copy(self):
return Mirror(HRcenter=self.HRcenter, normAngleHR=self.normAngleHR,
diameter=self.diameter, thickness=self.thickness,
wedgeAngle=self.wedgeAngle, inv_ROC_HR=self.inv_ROC_HR,
inv_ROC_AR=self.inv_ROC_AR, Refl_HR=self.Refl_HR,
Trans_HR=self.Trans_HR, Refl_AR=self.Refl_AR, Trans_AR=self.Trans_AR,
n=self.n, name=self.name, HRtransmissive=self.HRtransmissive,
term_on_HR=self.term_on_HR)
#}}}
#{{{ get_side_info
def get_side_info(self):
'''
Return information on the sides of the mirror.
Returned value is a list of two tuples like [(center1, normVect1, length1), (center2, normVect2, length2)]
Each tuple corresponds to a side. center1 is the coordinates of the center of the side line. normVect1 is the normal vector of the side line. length1 is the length of the side line.
Returns
-------
[(float, float, float), (float, float, float)]
'''
r = self.diameter/2
v1h = np.array([self.thickness/2, r])
v1a = np.array([-self.thickness/2 - r*np.tan(self.wedgeAngle), r])
v1h = optics.geometric.vector_rotation_2D(v1h, self.normAngleHR) + self.center
v1a = optics.geometric.vector_rotation_2D(v1a, self.normAngleHR) + self.center
center1 = (v1h + v1a)/2
vn1 = optics.geometric.vector_rotation_2D(v1h - v1a, pi/2)
normVect1 = vn1/np.linalg.norm(vn1)
length1 = np.linalg.norm(v1h - v1a)
v2h = np.array([self.thickness/2, -r])
v2a = np.array([-self.thickness/2 + r*np.tan(self.wedgeAngle), -r])
v2h = optics.geometric.vector_rotation_2D(v2h, self.normAngleHR) + self.center
v2a = optics.geometric.vector_rotation_2D(v2a, self.normAngleHR) + self.center
center2 = (v2h + v2a)/2
vn2 = optics.geometric.vector_rotation_2D(v2h - v2a, -pi/2)
normVect2 = vn2/np.linalg.norm(vn2)
length2 = np.linalg.norm(v2h - v2a)
return [(center1, normVect1, length1), (center2, normVect2, length2)]
#}}}
#{{{ rotate
def rotate(self, angle, center=False):
'''
Rotate the mirror. If center is not specified, the center of rotation is
HRcenter. If center is given (as a vector), the center of rotation is
center. center is a position vector in the global coordinates.
Parameters
----------
angle : float
Angle of rotation.
center: array or boolean, optional
Center of rotation, or False.
'''
if center:
center = np.array(center)
pointer = self.HRcenter - center
pointer = optics.geometric.vector_rotation_2D(pointer, angle)
self.HRcenter = center + pointer
self.normAngleHR = self.normAngleHR + angle
#}}}
#{{{ Translate
def translate(self, trVect):
trVect = np.array(trVect)
self.center = self.center + trVect
#}}}
#{{{ Draw
def draw(self, cv, drawName=False):
'''
Draw itself
'''
plVect = optics.geometric.vector_rotation_2D(self.normVectHR, pi/2)
p1 = self.HRcenterC + plVect * self.diameter/2
p2 = p1 - plVect * self.diameter
p3 = p2 - self.normVectHR * (self.thickness - np.tan(self.wedgeAngle)*self.diameter/2)
p4 = p1 - self.normVectHR * (self.thickness + np.tan(self.wedgeAngle)*self.diameter/2)
cv.add_shape(draw.Line(p2,p3), layername="Mirrors")
cv.add_shape(draw.Line(p4,p1), layername="Mirrors")
d = self.thickness/10
l1 = p1 - self.normVectHR * d
l2 = p2 - self.normVectHR * d
cv.add_shape(draw.Line(l1,l2), layername="Mirrors")
#Draw Curved surface
#HR
if np.abs(self.inv_ROC_HR) > 1.0/1e5:
R = 1/self.inv_ROC_HR
theta = np.arcsin(self.diameter/2/R)
sag = R*(1-np.cos(theta))
x = | np.linspace(0, self.diameter/2, 30) | numpy.linspace |
import copy
import numpy as np
import torch
class Memory:
def __init__(self, memory_size, nb_total_classes, rehearsal, fixed=True):
self.memory_size = memory_size
self.nb_total_classes = nb_total_classes
self.rehearsal = rehearsal
self.fixed = fixed
self.x = self.y = self.t = None
self.nb_classes = 0
@property
def memory_per_class(self):
if self.fixed:
return self.memory_size // self.nb_total_classes
return self.memory_size // self.nb_classes if self.nb_classes > 0 else self.memory_size
def get_dataset(self, base_dataset):
dataset = copy.deepcopy(base_dataset)
dataset._x = self.x
dataset._y = self.y
dataset._t = self.t
return dataset
def get(self):
return self.x, self.y, self.t
def __len__(self):
return len(self.x) if self.x is not None else 0
def save(self, path):
np.savez(
path,
x=self.x, y=self.y, t=self.t
)
def load(self, path):
data = np.load(path)
self.x = data["x"]
self.y = data["y"]
self.t = data["t"]
assert len(self) <= self.memory_size, len(self)
self.nb_classes = len(np.unique(self.y))
def reduce(self):
x, y, t = [], [], []
for class_id in np.unique(self.y):
indexes = np.where(self.y == class_id)[0]
x.append(self.x[indexes[:self.memory_per_class]])
y.append(self.y[indexes[:self.memory_per_class]])
t.append(self.t[indexes[:self.memory_per_class]])
self.x = np.concatenate(x)
self.y = np.concatenate(y)
self.t = np.concatenate(t)
def add(self, dataset, model, nb_new_classes):
self.nb_classes += nb_new_classes
x, y, t = herd_samples(dataset, model, self.memory_per_class, self.rehearsal)
#assert len(y) == self.memory_per_class * nb_new_classes, (len(y), self.memory_per_class, nb_new_classes)
if self.x is None:
self.x, self.y, self.t = x, y, t
else:
if not self.fixed:
self.reduce()
self.x = np.concatenate((self.x, x))
self.y = | np.concatenate((self.y, y)) | numpy.concatenate |
##
# Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File: total_variation.py
#
# Purpose: Demonstrates how to solve a total
# variation problem using the Fusion API.
##
import sys
import mosek
from mosek.fusion import *
import numpy as np
def total_var(n,m,f,sigma):
with Model('TV') as M:
u= M.variable( [n+1,m+1], Domain.inRange(0.,1.0) )
t= M.variable( [n,m], Domain.unbounded() )
ucore= u.slice( [0,0], [n,m] )
deltax= Expr.sub( u.slice( [1,0], [n+1,m] ), ucore)
deltay= Expr.sub( u.slice( [0,1], [n,m+1] ), ucore)
M.constraint( Expr.stack(2, t, deltax, deltay), Domain.inQCone().axis(2) )
fmat = Matrix.dense(n,m,f)
M.constraint( Expr.vstack(sigma, Expr.flatten( Expr.sub( fmat, ucore ) ) ),
Domain.inQCone() )
M.objective( ObjectiveSense.Minimize, Expr.sum(t) )
M.setLogHandler(sys.stdout)
M.solve()
return ucore.level()
#Display
def show(n,m,grid):
try:
import matplotlib
matplotlib.use('Agg') #Remove to go interactive
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.imshow( | np.reshape(grid, (n,m)) | numpy.reshape |
import numpy as np
import pandas as pd
import GPy, GPyOpt
from sklearn.model_selection import train_test_split
from sklearn.metrics import brier_score_loss as brier_score
from sklearn.metrics import accuracy_score, f1_score
from scipy.sparse import load_npz
from stuff.models import NBSVM, simpleNBSVM
from stuff.tools import tfidf_to_counts
from stuff.metrics import binary_diagnostics
# Importing the data
filedir = 'C:/data/addm/'
seeds = np.array(pd.read_csv(filedir + 'seeds.csv')).flatten()
corpus = pd.read_csv(filedir + 'corpus_with_lemmas_clean.csv')
doctermat = load_npz(filedir + 'doctermat.npz')
# Setting the features and targets
X = tfidf_to_counts(np.array(doctermat.todense(),
dtype=np.uint16))
y = np.array(corpus.aucaseyn, dtype=np.uint8)
n_range = range(corpus.shape[0])
# Toggle for the optimization loop
optimize = False
opt_iter = 30
if optimize:
# Regular function for hyperparameter evaluation
def evaluate_hps(beta, C):
mod = NBSVM(C=C, beta=beta)
mod.fit(X[train], y[train])
guesses = mod.predict(X[val]).flatten()
final_score = 1 - accuracy_score(y[val], guesses)
params = np.array([beta, C])
print('Params were ' + str(params))
print('Error was ' + str(final_score) + '\n')
return final_score
# Bounds for the GP optimizer
bounds = [{'name': 'beta',
'type': 'continuous',
'domain': (0.8, 1.0)},
{'name': 'C',
'type': 'discrete',
'domain': (0.001, 0.01, 1.0, 2, 2**2)}
]
# Function for GPyOpt to optimize
def f(x):
print(x)
eval = evaluate_hps(beta=float(x[:, 0]),
C=float(x[:, 1]))
return eval
# Running the optimization
train, val = train_test_split(n_range,
test_size=0.3,
stratify=y,
random_state=10221983)
opt_mod = GPyOpt.methods.BayesianOptimization(f=f,
num_cores=20,
domain=bounds,
initial_design_numdata=5)
opt_mod.run_optimization(opt_iter)
best = opt_mod.x_opt
# Saving the best parameters to CSV
pd.Series(best).to_csv(filedir + 'models/best_nbsvm_params.csv',
index=False)
# Running the splits
stats = pd.DataFrame( | np.zeros([10, 15]) | numpy.zeros |
import csv
import sys
import os
import numpy as np
import torch
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import pandas as pd
import traceback
def get_csv_data(csv_file, labels, space_separated=False):
data, all_labels = get_csv_data_and_labels(csv_file,
space_separated=space_separated)
# # Uncomment to print the labels
# print(csv_file)
# for label in all_labels:
# print(label)
# print('***\n'*3)
n_data = data.shape[0]
new_data = np.zeros((len(labels), n_data))
for ll, name in enumerate(labels):
if name in all_labels:
idx = all_labels.index(name)
try:
new_data[ll, :] = data[:, idx]
except Exception:
print(traceback.format_exc())
print("Error with data in %s" % csv_file)
sys.exit(1)
else:
raise ValueError("Label '%s' not available in file '%s'"
% (name, csv_file))
return new_data
def get_csv_data_and_labels(csv_file, space_separated=False):
# Read from CSV file
try:
if space_separated:
series = pd.read_csv(csv_file, delim_whitespace=True)
else:
series = pd.read_csv(csv_file)
except Exception:
print(traceback.format_exc())
print("Error reading %s" % csv_file)
sys.exit(1)
data = series.values
labels = list(series)
return data, labels
def plot_intentions_eval_returns(csv_file, block=False, num_intentions=None):
if num_intentions is None:
num_intentions = 0
# else:
# num_intentions += 1
labels = list()
# Add Intentional-Unintentional Label
label = 'Test Returns Mean'
for uu in range(num_intentions):
new_string = label + (' [%02d]' % uu)
labels.append(new_string)
# Assuming the Main does not have a prefix
labels.append(label)
data = get_csv_data(csv_file, labels)
fig, axs = subplots(num_intentions + 1)
if not isinstance(axs, np.ndarray):
axs = np.array([axs])
fig.subplots_adjust(hspace=0)
fig.suptitle('Average Return', fontweight='bold')
for aa, ax in enumerate(axs):
ax.plot(data[aa])
ax.set_ylabel(labels[aa])
plt.setp(ax.get_xticklabels(), visible=False)
axs[-1].set_xlabel('Episodes')
plt.setp(axs[-1].get_xticklabels(), visible=True)
print('total_iters:', len(data[-1]))
plt.show(block=block)
def plot_intentions_info(csv_file, num_intentions=None, block=False):
infos_to_plot = [
'Alpha',
'Entropy',
]
adim = get_max_action_idx(csv_file, "Mean Action ") + 1
for ii in range(adim):
infos_to_plot.append('Std Action %02d' % ii)
# for ii in range(adim):
# infos_to_plot.append('Mean Action %02d' % ii)
for info in infos_to_plot:
plot_intention_info(csv_file, info, plot_label=None, per_action=False,
num_intentions=num_intentions, block=block)
def plot_intention_info(csv_file, csv_label, plot_label=None, per_action=False,
num_intentions=None, block=False):
if num_intentions is None:
num_intentions = 0
if plot_label is None:
plot_label = csv_label
# Add Intentional-Unintentional Label
csv_labels = []
plot_labels = []
for uu in range(num_intentions):
csv_labels.append(csv_label + (' [U-%02d]' % uu))
plot_labels.append(plot_label + (' [U-%02d]' % uu))
# Assuming the Main does not have a prefix
csv_labels.append(csv_label)
plot_labels.append(plot_label)
data = get_csv_data(csv_file, csv_labels)
if per_action:
subplots_shape = (num_intentions + 1)
else:
subplots_shape = (num_intentions + 1)
fig, axs = subplots(subplots_shape)
if not isinstance(axs, np.ndarray):
axs = np.array([axs])
fig.subplots_adjust(hspace=0)
fig.suptitle(csv_label, fontweight='bold')
for aa, ax in enumerate(axs):
ax.plot(data[aa])
ax.set_ylabel(plot_labels[aa])
plt.setp(ax.get_xticklabels(), visible=False)
axs[-1].set_xlabel('Episodes')
plt.setp(axs[-1].get_xticklabels(), visible=True)
plt.show(block=block)
def plot_contours(ax, x_tensor, y_tensor, values):
contours = ax.contour(x_tensor, y_tensor, values, 20,
colors='dimgray')
ax.clabel(contours, inline=1, fontsize=10, fmt='%.0f')
ax.imshow(values, extent=(x_tensor.min(), x_tensor.max(),
y_tensor.min(), y_tensor.max()),
origin='lower', alpha=0.5)
def plot_q_values(qf, action_lower, action_higher, obs, policy=None,
obs_dims=(0, 1), action_dims=(0, 1), delta=0.01,
device='cpu'):
# Values Plots
num_intentions = 2
action_dim_x = action_dims[0]
action_dim_y = action_dims[1]
obs_dim_x = obs_dims[0]
obs_dim_y = obs_dims[1]
x_min = action_lower[action_dim_x]
y_min = action_lower[action_dim_y]
x_max = action_higher[action_dim_x]
y_max = action_higher[action_dim_y]
action_dim = len(action_lower)
all_x = torch.arange(float(x_min), float(x_max), float(delta))
all_y = torch.arange(float(y_min), float(y_max), float(delta))
x_mesh, y_mesh = torch.meshgrid(all_x, all_y)
x_mesh = x_mesh.t()
y_mesh = y_mesh.t()
all_acts = torch.stack((x_mesh, y_mesh), dim=-1)
fig, all_axs = \
subplots(1, num_intentions + 1,
gridspec_kw={'wspace': 0, 'hspace': 0},
)
# fig.suptitle('Q-val Observation: ' + str(ob))
fig.tight_layout()
fig.canvas.set_window_title('q_vals_%1d_%1d' %
(obs[obs_dim_x], obs[obs_dim_y]))
all_axs = | np.atleast_1d(all_axs) | numpy.atleast_1d |
#!/usr/bin/env python
import numpy as np
from numpy import linalg as LA
from collections import Counter
class CoulombDescriptor:
def __init__(self, descriptor):
self.descriptor = descriptor
def ConstructDescriptor(self, InputMat, mat_Z = None):
if self.descriptor == 'Coulomb2Eigval':
return self.Coulomb2Eigval(InputMat)
elif self.descriptor == 'XYZnuc2CoulombMat':
return self.XYZnuc2CoulombMat(InputMat, mat_Z)
elif self.descriptor == 'BagOfAtoms':
Z_i = self.getNuclearCharges(InputMat)
Atoms = self.CountAtoms(Z_i)
Diag = self.Coulomb2Diagonal(InputMat)
BoA = self.Diagonal2BoA(Diag, Z_i, Atoms)
return BoA
elif self.descriptor == 'BagOfBonds':
Z_i = self.getNuclearCharges(InputMat)
ZiZj = self.getOffdiagonal(Z_i)
Bonds = self.CountBonds(ZiZj)
BoB = self.Coulomb2BoB(InputMat, ZiZj, Bonds)
return BoB
elif self.descriptor == 'BagOfAtomsBonds':
Z_i = self.getNuclearCharges(InputMat)
Atoms = self.CountAtoms(Z_i)
Diag = self.Coulomb2Diagonal(InputMat)
BoA = self.Diagonal2BoA(Diag, Z_i, Atoms)
ZiZj = self.getOffdiagonal(Z_i)
Bonds = self.CountBonds(ZiZj)
BoB = self.Coulomb2BoB(InputMat, ZiZj, Bonds)
return np.concatenate((BoA, BoB), axis = 1)
def get_CoulombMat(self, atom_xs, Zmat):
m = len(Zmat)
Cmat = | np.zeros((m,m)) | numpy.zeros |
import tensorflow as tf
from tensorflow import keras
import numpy as np
import random
import cv2
class Agent:
ACTION_SPACE = [(0, 0.1), (-1, 0.1), (1, 0.1)]
def __init__(self):
# -- Basic settings related to the environment --
self.num_actions = 3
# -- TD trainer options --
self.learning_rate = 0.0001 # learning rate
self.momentum = 0.8 # momentum
self.batch_size = 4 # batch size
self.decay = 0.00001 # learning rate decay
# -- other options --
self.gamma = 0.9 # future discount for reward
self.epsilon = 0.20 # epsilon during training
self.epsilon_decay = 0.99
self.min_epsilon = 0.05
self.start_learn_threshold = 20 # minimum number of examples in replay memory before learning
self.experience_size = 1000 # size of replay memory
self.learning_steps_burnin = 20 # number of random actions the agent takes before learning
self.learning_steps_total = 10000 # number of training iterations
# -- Buffered model --
self._model = self._build_model()
# -- Replay memory --
self._memory = [[] for _ in range(self.experience_size)]
# -- Agent counter
self.step = 0
# loss record
self.loss_rec = []
def _build_model(self):
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(60, 240, 1)))
model.add(keras.layers.Conv2D(8, (3, 3), padding='same'))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Conv2D(16, (3, 3), padding='same'))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Conv2D(32, (3, 3), padding='same'))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(32))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(self.num_actions, activation='linear'))
model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(lr=self.learning_rate, decay=self.decay))
model.summary()
return model
def _forward(self, state):
prediction = self._model.predict(state)
return prediction
def _backward(self, state, target, epochs=1):
history = self._model.fit(state, target, epochs=epochs, verbose=0)
self.loss_rec.append(history.history['loss'][-1])
if self.step % 1000 == 0:
np.save('./_out/online_loss.npy', self.loss_rec)
return history.history['loss'][-1]
def _remember(self, state, action, reward, next_state, debug=False):
index = self.step % self.experience_size
self._memory[index] = (state, action, reward, next_state)
if debug:
mean, stdv = [120.9934, 18.8303]
state = (state * stdv) + mean
state = state.astype(int)
cv2.imwrite('./_debug/{0:0>5}-{1}-{2}.png'.format(self.step, action, reward), state)
# cv2.imwrite('./_debug/' + str(self.step) + '_' + str(action) + '_' + str(reward) + '.png', state)
# cv2.imwrite('./_debug/' + str(self.step) + '_next' + '.png', next_state)
def learn(self, state, action, reward, next_state):
self._remember(state, action, reward, next_state)
if self.step >= self.start_learn_threshold:
minibatch = random.sample(self._memory[0:self.step], self.batch_size)
states = np.array([dp[0] for dp in minibatch])
actions = np.array([dp[1] for dp in minibatch])
rewards = np.array([dp[2] for dp in minibatch])
next_states = np.array([dp[3] for dp in minibatch])
q_hats = self._forward(next_states)
td_targets = rewards + self.gamma * np.amax(q_hats)
predictions = self._forward(states)
for i in range(len(predictions)):
predictions[i][actions[i]] = td_targets[i]
self._backward(states, predictions)
self.step += 1
def act(self, state):
self.epsilon = np.max([self.epsilon * self.epsilon_decay, self.min_epsilon])
if self.step < self.learning_steps_burnin or np.random.rand() < self.epsilon:
return random.randint(0, self.num_actions - 1)
else:
act_values = self._forward(np.array([state]))
print(act_values)
return np.argmax(act_values)
def act_greedy(self, state):
""" Always act based on the output of NN
"""
act_values = self._forward(np.array([state]))
print('Greedy: {}'.format(act_values))
return np.argmax(act_values)
def act_with_guidence(self, state, cte):
self.epsilon = np.max([self.epsilon * self.epsilon_decay, self.min_epsilon])
if self.step < self.learning_steps_burnin or np.random.rand() < self.epsilon:
print('guiding')
if abs(cte) < 1.5:
return 0
elif cte > 0:
return 1
else:
return 2
else:
act_values = self._forward(np.array([state]))
print(act_values)
return np.argmax(act_values)
def get_reward(self, cte):
if abs(cte) >= 2.5:
return -100
else:
return 1
def load(self, name):
self._model.load_weights(name)
def save(self, name):
self._model.save_weights(name)
def save_memory(self, name):
width, height = 240, 60
serialized = []
for index, m in enumerate(self._memory):
print(m[0].shape)
s_state = np.reshape(m[0], [width * height])
s_next_state = np.reshape(m[3], [width * height])
serialized.append(np.concatenate([s_state, [m[1]], [m[2]], s_next_state]))
serialized = np.array(serialized)
print('saved memory successfully')
np.save(name, serialized)
def read_memory(self, name):
width, height = 240, 60
l = width * height
serialized = | np.load(name) | numpy.load |
# from .image_extractor import extract_features
import numpy as np
from PIL import Image, ImageDraw, ImageColor
import consistent_points as cp
import transformations_models as tm
import ransac
def extract_files(file_path):
file = open(file_path, 'r')
coordinates = []
features = []
counter = 0
for line in file:
if counter >= 2:
elements = line.split(' ')
coordinates.append(np.array(elements[0:2], dtype=np.float))
features.append(np.array(elements[5:], dtype=np.int))
counter += 1
file.close()
return np.array(coordinates), np.array(features)
def find_key_points(image1_features, image2_features):
image1_neighbours = np.array(list(map(lambda image1_feature:
cp.find_nearest_neighbours(image1_feature, image2_features)[0],
image1_features)))
image2_neighbours = np.array(list(map(lambda image2_feature:
cp.find_nearest_neighbours(image2_feature, image1_features)[0],
image2_features)))
enumerated = list(enumerate(image1_neighbours)) # (index, neighbour_index)
indexes = np.array(list(filter(lambda img1_neighbour_index:
img1_neighbour_index[0] == image2_neighbours[img1_neighbour_index[1]],
enumerated)))
return np.array([image1_features[indexes[:, 0]], image2_features[indexes[:, 1]]]), indexes
def draw_lines(coordinates_img1, coordinates_img2, A = None, color='red'):
img1 = Image.open(first_image_path)
img2 = Image.open(second_image_path)
# img2 = img2.transform(img1.size, Image.AFFINE, A[0:2].flatten())
concat = Image.fromarray(np.hstack((np.array(img1), np.array(img2))))
d = ImageDraw.Draw(concat)
for i in range(0, len(coordinates_img2)):
d.line([coordinates_img1[i][0],
coordinates_img1[i][1],
coordinates_img2[i][0] + img1.size[0],
coordinates_img2[i][1]],
fill=ImageColor.getrgb(color))
del d
concat.show()
def error_fn(pairs, transformed_pairs, min_inliners):
res = np.linalg.norm(pairs - transformed_pairs, axis=1)
return np.nonzero(res <= min_inliners)[0]
def transformation_fn(points, transformation_params):
A = tm.affine_transformation(transformation_params)
return A, (A @ np.insert(points.transpose(), 2, 0, axis=0))[0:2].transpose()
first_image_path = '../files/extracted/img3.png'
second_image_path = '../files/extracted/img2.png'
if __name__ == '__main__':
coordinates_image1, features_image1 = extract_files(first_image_path + '.haraff.sift')
coordinates_image2, features_image2 = extract_files(second_image_path + '.haraff.sift')
# swap elements to prevent IndexError
# always image1 will be the one with more points
if coordinates_image1.size < coordinates_image2.size:
temp = coordinates_image1, features_image1
coordinates_image1, features_image1 = coordinates_image2, features_image2
coordinates_image2, features_image2 = temp
temp = first_image_path
first_image_path = second_image_path
second_image_path = temp
key_points_features, key_points_indexes = find_key_points(features_image1, features_image2)
key_points_coordinates = np.array([
coordinates_image1[key_points_indexes[:, 0]], coordinates_image2[key_points_indexes[:, 1]]])
# draw before algorithms
draw_lines(
key_points_coordinates[0],
key_points_coordinates[1],
color='blue'
)
number_of_neighbours = int(15 * np.ceil(key_points_coordinates.shape[1] / 100)) # 5% of all key points
min_compability = int( | np.ceil(40 / 100 * number_of_neighbours) | numpy.ceil |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards26))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards27))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards28))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards29))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards30))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards31))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards32))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards33))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards34))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards35))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards36))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards37))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards38))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards39))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards40))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards41))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards42))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards43))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards44))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards45))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards46))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards47))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards48))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards49))
drnnGRUreluMakespanList.append(drnnGRUreluMakespan0)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan1)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan2)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan3)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan4)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan5)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan6)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan7)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan8)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan9)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan10)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan11)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan12)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan13)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan14)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan15)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan16)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan17)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan18)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan19)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan20)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan21)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan22)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan23)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan24)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan25)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan26)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan27)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan28)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan29)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan30)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan31)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan32)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan33)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan34)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan35)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan36)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan37)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan38)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan39)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan40)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan41)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan42)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan43)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan44)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan45)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan46)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan47)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan48)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan49)
drnnGRUreluRewardsList.append(drnnGRUreluRewards0)
drnnGRUreluRewardsList.append(drnnGRUreluRewards1)
drnnGRUreluRewardsList.append(drnnGRUreluRewards2)
drnnGRUreluRewardsList.append(drnnGRUreluRewards3)
drnnGRUreluRewardsList.append(drnnGRUreluRewards4)
drnnGRUreluRewardsList.append(drnnGRUreluRewards5)
drnnGRUreluRewardsList.append(drnnGRUreluRewards6)
drnnGRUreluRewardsList.append(drnnGRUreluRewards7)
drnnGRUreluRewardsList.append(drnnGRUreluRewards8)
drnnGRUreluRewardsList.append(drnnGRUreluRewards9)
drnnGRUreluRewardsList.append(drnnGRUreluRewards10)
drnnGRUreluRewardsList.append(drnnGRUreluRewards11)
drnnGRUreluRewardsList.append(drnnGRUreluRewards12)
drnnGRUreluRewardsList.append(drnnGRUreluRewards13)
drnnGRUreluRewardsList.append(drnnGRUreluRewards14)
drnnGRUreluRewardsList.append(drnnGRUreluRewards15)
drnnGRUreluRewardsList.append(drnnGRUreluRewards16)
drnnGRUreluRewardsList.append(drnnGRUreluRewards17)
drnnGRUreluRewardsList.append(drnnGRUreluRewards18)
drnnGRUreluRewardsList.append(drnnGRUreluRewards19)
drnnGRUreluRewardsList.append(drnnGRUreluRewards20)
drnnGRUreluRewardsList.append(drnnGRUreluRewards21)
drnnGRUreluRewardsList.append(drnnGRUreluRewards22)
drnnGRUreluRewardsList.append(drnnGRUreluRewards23)
drnnGRUreluRewardsList.append(drnnGRUreluRewards24)
drnnGRUreluRewardsList.append(drnnGRUreluRewards25)
drnnGRUreluRewardsList.append(drnnGRUreluRewards26)
drnnGRUreluRewardsList.append(drnnGRUreluRewards27)
drnnGRUreluRewardsList.append(drnnGRUreluRewards28)
drnnGRUreluRewardsList.append(drnnGRUreluRewards29)
drnnGRUreluRewardsList.append(drnnGRUreluRewards30)
drnnGRUreluRewardsList.append(drnnGRUreluRewards31)
drnnGRUreluRewardsList.append(drnnGRUreluRewards32)
drnnGRUreluRewardsList.append(drnnGRUreluRewards33)
drnnGRUreluRewardsList.append(drnnGRUreluRewards34)
drnnGRUreluRewardsList.append(drnnGRUreluRewards35)
drnnGRUreluRewardsList.append(drnnGRUreluRewards36)
drnnGRUreluRewardsList.append(drnnGRUreluRewards37)
drnnGRUreluRewardsList.append(drnnGRUreluRewards38)
drnnGRUreluRewardsList.append(drnnGRUreluRewards39)
drnnGRUreluRewardsList.append(drnnGRUreluRewards40)
drnnGRUreluRewardsList.append(drnnGRUreluRewards41)
drnnGRUreluRewardsList.append(drnnGRUreluRewards42)
drnnGRUreluRewardsList.append(drnnGRUreluRewards43)
drnnGRUreluRewardsList.append(drnnGRUreluRewards44)
drnnGRUreluRewardsList.append(drnnGRUreluRewards45)
drnnGRUreluRewardsList.append(drnnGRUreluRewards46)
drnnGRUreluRewardsList.append(drnnGRUreluRewards47)
drnnGRUreluRewardsList.append(drnnGRUreluRewards48)
drnnGRUreluRewardsList.append(drnnGRUreluRewards49)
for vector in drnnGRUtanhMakespanList:
for element in vector:
drnnGRUtanhMakespanValues.append(element)
for vector in drnnGRUtanhRewardsList:
for element in vector:
drnnGRUtanhRewardsValues.append(element)
##################
for vector in drnnGRUreluMakespanList:
for element in vector:
drnnGRUreluMakespanValues.append(element)
for vector in drnnGRUreluRewardsList:
for element in vector:
drnnGRUreluRewardsValues.append(element)
#####################
smoothGRUtanhMakespanValues = pd.Series(drnnGRUtanhMakespanValues).rolling(12).mean()
plt.plot(smoothGRUtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
smoothGRUtanhRewardsValues = pd.Series(drnnGRUtanhRewardsValues).rolling(12).mean()
plt.plot(smoothGRUtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
#####################
smoothGRUreluMakespanValues = pd.Series(drnnGRUreluMakespanValues).rolling(12).mean()
plt.plot(smoothGRUreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
smoothGRUreluRewardsValues = pd.Series(drnnGRUreluRewardsValues).rolling(12).mean()
plt.plot(smoothGRUreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
###################
plt.plot(smoothGRUtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothGRUreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
plt.plot(smoothGRUtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothGRUreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
drnnLSTMtanhMakespan = []
drnnLSTMtanhRewards = []
drnnLSTMtanhMakespanList = []
drnnLSTMtanhRewardsList = []
drnnLSTMtanhMakespanValues = []
drnnLSTMtanhRewardsValues = []
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan0))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan1))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan2))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan3))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan4))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan5))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan6))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan7))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan8))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan9))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan10))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan11))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan12))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan13))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan14))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan15))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan16))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan17))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan18))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan19))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan20))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan21))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan22))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan23))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan24))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan25))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan26))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan27))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan28))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan29))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan30))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan31))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan32))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan33))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan34))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan35))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan36))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan37))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan38))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan39))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan40))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan41))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan42))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan43))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan44))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan45))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan46))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan47))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan48))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan49))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards0))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards1))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards2))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards3))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards4))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards5))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards6))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards7))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards8))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards9))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards10))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards11))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards12))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards13))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards14))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards15))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards16))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards17))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards18))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards19))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards20))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards21))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards22))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards23))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards24))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards25))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards26))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards27))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards28))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards29))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards30))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards31))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards32))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards33))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards34))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards35))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards36))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards37))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards38))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards39))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards40))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards41))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards42))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards43))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards44))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards45))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards46))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards47))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards48))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards49))
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan0)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan1)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan2)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan3)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan4)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan5)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan6)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan7)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan8)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan9)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan10)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan11)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan12)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan13)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan14)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan15)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan16)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan17)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan18)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan19)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan20)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan21)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan22)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan23)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan24)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan25)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan26)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan27)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan28)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan29)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan30)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan31)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan32)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan33)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan34)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan35)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan36)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan37)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan38)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan39)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan40)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan41)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan42)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan43)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan44)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan45)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan46)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan47)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan48)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan49)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards0)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards1)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards2)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards3)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards4)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards5)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards6)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards7)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards8)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards9)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards10)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards11)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards12)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards13)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards14)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards15)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards16)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards17)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards18)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards19)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards20)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards21)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards22)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards23)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards24)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards25)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards26)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards27)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards28)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards29)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards30)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards31)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards32)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards33)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards34)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards35)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards36)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards37)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards38)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards39)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards40)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards41)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards42)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards43)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards44)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards45)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards46)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards47)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards48)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards49)
for vector in drnnLSTMtanhMakespanList:
for element in vector:
drnnLSTMtanhMakespanValues.append(element)
for vector in drnnLSTMtanhRewardsList:
for element in vector:
drnnLSTMtanhRewardsValues.append(element)
smoothLSTMtanhMakespanValues = pd.Series(drnnLSTMtanhMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con tanh")
plt.show()
smoothLSTMtanhRewardsValues = pd.Series(drnnLSTMtanhRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con tanh")
plt.show()
####################
drnnLSTMreluMakespan = []
drnnLSTMreluRewards = []
drnnLSTMreluMakespanList = []
drnnLSTMreluRewardsList = []
drnnLSTMreluMakespanValues = []
drnnLSTMreluRewardsValues = []
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan0))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan1))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan2))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan3))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan4))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan5))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan6))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan7))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan8))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan9))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan10))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan11))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan12))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan13))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan14))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan15))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan16))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan17))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan18))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan19))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan20))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan21))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan22))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan23))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan24))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan25))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan26))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan27))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan28))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan29))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan30))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan31))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan32))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan33))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan34))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan35))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan36))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan37))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan38))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan39))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan40))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan41))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan42))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan43))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan44))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan45))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan46))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan47))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan48))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan49))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards0))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards1))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards2))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards3))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards4))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards5))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards6))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards7))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards8))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards9))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards10))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards11))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards12))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards13))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards14))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards15))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards16))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards17))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards18))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards19))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards20))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards21))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards22))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards23))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards24))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards25))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards26))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards27))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards28))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards29))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards30))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards31))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards32))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards33))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards34))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards35))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards36))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards37))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards38))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards39))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards40))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards41))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards42))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards43))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards44))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards45))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards46))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards47))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards48))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards49))
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan0)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan1)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan2)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan3)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan4)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan5)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan6)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan7)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan8)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan9)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan10)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan11)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan12)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan13)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan14)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan15)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan16)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan17)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan18)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan19)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan20)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan21)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan22)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan23)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan24)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan25)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan26)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan27)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan28)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan29)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan30)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan31)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan32)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan33)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan34)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan35)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan36)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan37)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan38)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan39)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan40)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan41)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan42)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan43)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan44)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan45)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan46)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan47)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan48)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan49)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards0)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards1)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards2)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards3)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards4)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards5)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards6)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards7)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards8)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards9)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards10)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards11)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards12)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards13)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards14)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards15)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards16)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards17)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards18)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards19)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards20)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards21)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards22)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards23)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards24)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards25)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards26)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards27)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards28)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards29)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards30)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards31)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards32)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards33)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards34)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards35)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards36)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards37)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards38)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards39)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards40)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards41)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards42)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards43)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards44)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards45)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards46)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards47)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards48)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards49)
for vector in drnnLSTMreluMakespanList:
for element in vector:
drnnLSTMreluMakespanValues.append(element)
for vector in drnnLSTMreluRewardsList:
for element in vector:
drnnLSTMreluRewardsValues.append(element)
smoothLSTMreluMakespanValues = pd.Series(drnnLSTMreluMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con relu")
plt.show()
smoothLSTMreluRewardsValues = pd.Series(drnnLSTMreluRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con relu")
plt.show()
##################
plt.plot(smoothLSTMtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
plt.plot(smoothLSTMtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
##################
##################
drlTanhMakespan = []
drlTanhRewards = []
drlTanhMakespanList = []
drlTanhRewardsList = []
drlTanhMakespanValues = []
drlTanhRewardsValues = []
drlTanhMakespan.append(np.mean(drlTanhMakespan0))
drlTanhMakespan.append(np.mean(drlTanhMakespan1))
drlTanhMakespan.append(np.mean(drlTanhMakespan2))
drlTanhMakespan.append(np.mean(drlTanhMakespan3))
drlTanhMakespan.append(np.mean(drlTanhMakespan4))
drlTanhMakespan.append(np.mean(drlTanhMakespan5))
drlTanhMakespan.append(np.mean(drlTanhMakespan6))
drlTanhMakespan.append(np.mean(drlTanhMakespan7))
drlTanhMakespan.append(np.mean(drlTanhMakespan8))
drlTanhMakespan.append(np.mean(drlTanhMakespan9))
drlTanhMakespan.append(np.mean(drlTanhMakespan10))
drlTanhMakespan.append(np.mean(drlTanhMakespan11))
drlTanhMakespan.append(np.mean(drlTanhMakespan12))
drlTanhMakespan.append(np.mean(drlTanhMakespan13))
drlTanhMakespan.append(np.mean(drlTanhMakespan14))
drlTanhMakespan.append(np.mean(drlTanhMakespan15))
drlTanhMakespan.append(np.mean(drlTanhMakespan16))
drlTanhMakespan.append(np.mean(drlTanhMakespan17))
drlTanhMakespan.append(np.mean(drlTanhMakespan18))
drlTanhMakespan.append(np.mean(drlTanhMakespan19))
drlTanhMakespan.append(np.mean(drlTanhMakespan20))
drlTanhMakespan.append(np.mean(drlTanhMakespan21))
drlTanhMakespan.append(np.mean(drlTanhMakespan22))
drlTanhMakespan.append(np.mean(drlTanhMakespan23))
drlTanhMakespan.append(np.mean(drlTanhMakespan24))
drlTanhMakespan.append(np.mean(drlTanhMakespan25))
drlTanhMakespan.append(np.mean(drlTanhMakespan26))
drlTanhMakespan.append(np.mean(drlTanhMakespan27))
drlTanhMakespan.append(np.mean(drlTanhMakespan28))
drlTanhMakespan.append(np.mean(drlTanhMakespan29))
drlTanhMakespan.append(np.mean(drlTanhMakespan30))
drlTanhMakespan.append(np.mean(drlTanhMakespan31))
drlTanhMakespan.append(np.mean(drlTanhMakespan32))
drlTanhMakespan.append(np.mean(drlTanhMakespan33))
drlTanhMakespan.append(np.mean(drlTanhMakespan34))
drlTanhMakespan.append(np.mean(drlTanhMakespan35))
drlTanhMakespan.append(np.mean(drlTanhMakespan36))
drlTanhMakespan.append(np.mean(drlTanhMakespan37))
drlTanhMakespan.append(np.mean(drlTanhMakespan38))
drlTanhMakespan.append(np.mean(drlTanhMakespan39))
drlTanhMakespan.append(np.mean(drlTanhMakespan40))
drlTanhMakespan.append(np.mean(drlTanhMakespan41))
drlTanhMakespan.append(np.mean(drlTanhMakespan42))
drlTanhMakespan.append(np.mean(drlTanhMakespan43))
drlTanhMakespan.append(np.mean(drlTanhMakespan44))
drlTanhMakespan.append(np.mean(drlTanhMakespan45))
drlTanhMakespan.append(np.mean(drlTanhMakespan46))
drlTanhMakespan.append(np.mean(drlTanhMakespan47))
drlTanhMakespan.append(np.mean(drlTanhMakespan48))
drlTanhMakespan.append(np.mean(drlTanhMakespan49))
drlTanhRewards.append(np.mean(drlTanhRewards0))
drlTanhRewards.append(np.mean(drlTanhRewards1))
drlTanhRewards.append(np.mean(drlTanhRewards2))
drlTanhRewards.append(np.mean(drlTanhRewards3))
drlTanhRewards.append(np.mean(drlTanhRewards4))
drlTanhRewards.append(np.mean(drlTanhRewards5))
drlTanhRewards.append(np.mean(drlTanhRewards6))
drlTanhRewards.append(np.mean(drlTanhRewards7))
drlTanhRewards.append(np.mean(drlTanhRewards8))
drlTanhRewards.append(np.mean(drlTanhRewards9))
drlTanhRewards.append(np.mean(drlTanhRewards10))
drlTanhRewards.append(np.mean(drlTanhRewards11))
drlTanhRewards.append(np.mean(drlTanhRewards12))
drlTanhRewards.append(np.mean(drlTanhRewards13))
drlTanhRewards.append(np.mean(drlTanhRewards14))
drlTanhRewards.append(np.mean(drlTanhRewards15))
drlTanhRewards.append(np.mean(drlTanhRewards16))
drlTanhRewards.append(np.mean(drlTanhRewards17))
drlTanhRewards.append(np.mean(drlTanhRewards18))
drlTanhRewards.append(np.mean(drlTanhRewards19))
drlTanhRewards.append(np.mean(drlTanhRewards20))
drlTanhRewards.append(np.mean(drlTanhRewards21))
drlTanhRewards.append(np.mean(drlTanhRewards22))
drlTanhRewards.append(np.mean(drlTanhRewards23))
drlTanhRewards.append(np.mean(drlTanhRewards24))
drlTanhRewards.append(np.mean(drlTanhRewards25))
drlTanhRewards.append(np.mean(drlTanhRewards26))
drlTanhRewards.append(np.mean(drlTanhRewards27))
drlTanhRewards.append(np.mean(drlTanhRewards28))
drlTanhRewards.append(np.mean(drlTanhRewards29))
drlTanhRewards.append(np.mean(drlTanhRewards30))
drlTanhRewards.append(np.mean(drlTanhRewards31))
drlTanhRewards.append(np.mean(drlTanhRewards32))
drlTanhRewards.append(np.mean(drlTanhRewards33))
drlTanhRewards.append(np.mean(drlTanhRewards34))
drlTanhRewards.append(np.mean(drlTanhRewards35))
drlTanhRewards.append(np.mean(drlTanhRewards36))
drlTanhRewards.append(np.mean(drlTanhRewards37))
drlTanhRewards.append(np.mean(drlTanhRewards38))
drlTanhRewards.append(np.mean(drlTanhRewards39))
drlTanhRewards.append(np.mean(drlTanhRewards40))
drlTanhRewards.append(np.mean(drlTanhRewards41))
drlTanhRewards.append(np.mean(drlTanhRewards42))
drlTanhRewards.append(np.mean(drlTanhRewards43))
drlTanhRewards.append(np.mean(drlTanhRewards44))
drlTanhRewards.append(np.mean(drlTanhRewards45))
drlTanhRewards.append(np.mean(drlTanhRewards46))
drlTanhRewards.append(np.mean(drlTanhRewards47))
drlTanhRewards.append(np.mean(drlTanhRewards48))
drlTanhRewards.append(np.mean(drlTanhRewards49))
drlTanhMakespanList.append(drlTanhMakespan0)
drlTanhMakespanList.append(drlTanhMakespan1)
drlTanhMakespanList.append(drlTanhMakespan2)
drlTanhMakespanList.append(drlTanhMakespan3)
drlTanhMakespanList.append(drlTanhMakespan4)
drlTanhMakespanList.append(drlTanhMakespan5)
drlTanhMakespanList.append(drlTanhMakespan6)
drlTanhMakespanList.append(drlTanhMakespan7)
drlTanhMakespanList.append(drlTanhMakespan8)
drlTanhMakespanList.append(drlTanhMakespan9)
drlTanhMakespanList.append(drlTanhMakespan10)
drlTanhMakespanList.append(drlTanhMakespan11)
drlTanhMakespanList.append(drlTanhMakespan12)
drlTanhMakespanList.append(drlTanhMakespan13)
drlTanhMakespanList.append(drlTanhMakespan14)
drlTanhMakespanList.append(drlTanhMakespan15)
drlTanhMakespanList.append(drlTanhMakespan16)
drlTanhMakespanList.append(drlTanhMakespan17)
drlTanhMakespanList.append(drlTanhMakespan18)
drlTanhMakespanList.append(drlTanhMakespan19)
drlTanhMakespanList.append(drlTanhMakespan20)
drlTanhMakespanList.append(drlTanhMakespan21)
drlTanhMakespanList.append(drlTanhMakespan22)
drlTanhMakespanList.append(drlTanhMakespan23)
drlTanhMakespanList.append(drlTanhMakespan24)
drlTanhMakespanList.append(drlTanhMakespan25)
drlTanhMakespanList.append(drlTanhMakespan26)
drlTanhMakespanList.append(drlTanhMakespan27)
drlTanhMakespanList.append(drlTanhMakespan28)
drlTanhMakespanList.append(drlTanhMakespan29)
drlTanhMakespanList.append(drlTanhMakespan30)
drlTanhMakespanList.append(drlTanhMakespan31)
drlTanhMakespanList.append(drlTanhMakespan32)
drlTanhMakespanList.append(drlTanhMakespan33)
drlTanhMakespanList.append(drlTanhMakespan34)
drlTanhMakespanList.append(drlTanhMakespan35)
drlTanhMakespanList.append(drlTanhMakespan36)
drlTanhMakespanList.append(drlTanhMakespan37)
drlTanhMakespanList.append(drlTanhMakespan38)
drlTanhMakespanList.append(drlTanhMakespan39)
drlTanhMakespanList.append(drlTanhMakespan40)
drlTanhMakespanList.append(drlTanhMakespan41)
drlTanhMakespanList.append(drlTanhMakespan42)
drlTanhMakespanList.append(drlTanhMakespan43)
drlTanhMakespanList.append(drlTanhMakespan44)
drlTanhMakespanList.append(drlTanhMakespan45)
drlTanhMakespanList.append(drlTanhMakespan46)
drlTanhMakespanList.append(drlTanhMakespan47)
drlTanhMakespanList.append(drlTanhMakespan48)
drlTanhMakespanList.append(drlTanhMakespan49)
drlTanhRewardsList.append(drlTanhRewards0)
drlTanhRewardsList.append(drlTanhRewards1)
drlTanhRewardsList.append(drlTanhRewards2)
drlTanhRewardsList.append(drlTanhRewards3)
drlTanhRewardsList.append(drlTanhRewards4)
drlTanhRewardsList.append(drlTanhRewards5)
drlTanhRewardsList.append(drlTanhRewards6)
drlTanhRewardsList.append(drlTanhRewards7)
drlTanhRewardsList.append(drlTanhRewards8)
drlTanhRewardsList.append(drlTanhRewards9)
drlTanhRewardsList.append(drlTanhRewards10)
drlTanhRewardsList.append(drlTanhRewards11)
drlTanhRewardsList.append(drlTanhRewards12)
drlTanhRewardsList.append(drlTanhRewards13)
drlTanhRewardsList.append(drlTanhRewards14)
drlTanhRewardsList.append(drlTanhRewards15)
drlTanhRewardsList.append(drlTanhRewards16)
drlTanhRewardsList.append(drlTanhRewards17)
drlTanhRewardsList.append(drlTanhRewards18)
drlTanhRewardsList.append(drlTanhRewards19)
drlTanhRewardsList.append(drlTanhRewards20)
drlTanhRewardsList.append(drlTanhRewards21)
drlTanhRewardsList.append(drlTanhRewards22)
drlTanhRewardsList.append(drlTanhRewards23)
drlTanhRewardsList.append(drlTanhRewards24)
drlTanhRewardsList.append(drlTanhRewards25)
drlTanhRewardsList.append(drlTanhRewards26)
drlTanhRewardsList.append(drlTanhRewards27)
drlTanhRewardsList.append(drlTanhRewards28)
drlTanhRewardsList.append(drlTanhRewards29)
drlTanhRewardsList.append(drlTanhRewards30)
drlTanhRewardsList.append(drlTanhRewards31)
drlTanhRewardsList.append(drlTanhRewards32)
drlTanhRewardsList.append(drlTanhRewards33)
drlTanhRewardsList.append(drlTanhRewards34)
drlTanhRewardsList.append(drlTanhRewards35)
drlTanhRewardsList.append(drlTanhRewards36)
drlTanhRewardsList.append(drlTanhRewards37)
drlTanhRewardsList.append(drlTanhRewards38)
drlTanhRewardsList.append(drlTanhRewards39)
drlTanhRewardsList.append(drlTanhRewards40)
drlTanhRewardsList.append(drlTanhRewards41)
drlTanhRewardsList.append(drlTanhRewards42)
drlTanhRewardsList.append(drlTanhRewards43)
drlTanhRewardsList.append(drlTanhRewards44)
drlTanhRewardsList.append(drlTanhRewards45)
drlTanhRewardsList.append(drlTanhRewards46)
drlTanhRewardsList.append(drlTanhRewards47)
drlTanhRewardsList.append(drlTanhRewards48)
drlTanhRewardsList.append(drlTanhRewards49)
for vector in drlTanhMakespanList:
for element in vector:
drlTanhMakespanValues.append(element)
for vector in drlTanhRewardsList:
for element in vector:
drlTanhRewardsValues.append(element)
smoothdrlTanhMakespanValues = pd.Series(drlTanhMakespanValues).rolling(12).mean()
plt.plot(smoothdrlTanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando feedforward con tanh")
plt.show()
smoothdrlTanhRewardsValues = pd.Series(drlTanhRewardsValues).rolling(12).mean()
plt.plot(smoothdrlTanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando feedforward con tanh")
plt.show()
####################
drlReluMakespan = []
drlReluRewards = []
drlReluMakespanList = []
drlReluRewardsList = []
drlReluMakespanValues = []
drlReluRewardsValues = []
drlReluMakespan.append(np.mean(drlReluMakespan0))
drlReluMakespan.append(np.mean(drlReluMakespan1))
drlReluMakespan.append(np.mean(drlReluMakespan2))
drlReluMakespan.append(np.mean(drlReluMakespan3))
drlReluMakespan.append(np.mean(drlReluMakespan4))
drlReluMakespan.append(np.mean(drlReluMakespan5))
drlReluMakespan.append(np.mean(drlReluMakespan6))
drlReluMakespan.append(np.mean(drlReluMakespan7))
drlReluMakespan.append(np.mean(drlReluMakespan8))
drlReluMakespan.append(np.mean(drlReluMakespan9))
drlReluMakespan.append(np.mean(drlReluMakespan10))
drlReluMakespan.append(np.mean(drlReluMakespan11))
drlReluMakespan.append(np.mean(drlReluMakespan12))
drlReluMakespan.append(np.mean(drlReluMakespan13))
drlReluMakespan.append(np.mean(drlReluMakespan14))
drlReluMakespan.append(np.mean(drlReluMakespan15))
drlReluMakespan.append(np.mean(drlReluMakespan16))
drlReluMakespan.append(np.mean(drlReluMakespan17))
drlReluMakespan.append(np.mean(drlReluMakespan18))
drlReluMakespan.append(np.mean(drlReluMakespan19))
drlReluMakespan.append(np.mean(drlReluMakespan20))
drlReluMakespan.append(np.mean(drlReluMakespan21))
drlReluMakespan.append(np.mean(drlReluMakespan22))
drlReluMakespan.append( | np.mean(drlReluMakespan23) | numpy.mean |
import pickle
import numpy as np
import pandas as pd
# Separating out AUC and specificity from pickle object
def auc_lst(test_nos, dset_lst):
metric_lst = []
auc_lst = []
spec_lst = []
for i, num in enumerate(test_nos):
with open(f'../results/logs/{num}/log_Test{num}_roc_plt_lst.pkl', 'rb') as f:
metric_lst.append(pickle.load(f))
for set_i, set_name in enumerate(dset_lst):
for i, roc in enumerate(metric_lst):
for index, thruple in enumerate(roc[set_i:set_i + 1]):
# for conf_i,_ in enumerate(conf_lst):
_, _, a_u_c, _, specificity = thruple
auc_lst.append(a_u_c)
spec_lst.append(specificity)
return auc_lst, spec_lst
if __name__ == '__main__':
# Surgical markings
# Getting AUC values for each random seed of each model type (replace test numbers as necessary)
AUC_baseline, spec_baseline = auc_lst([249, 253, 257, 261, 265, 269], ['Heid_Blank', 'Heid_Marked'])
AUC_LNTL, spec_LNTL = auc_lst([228, 254, 258, 262, 266, 270], ['Heid_Blank', 'Heid_Marked'])
AUC_TABE, spec_TABE = auc_lst([229, 255, 259, 263, 267, 271], ['Heid_Blank', 'Heid_Marked'])
AUC_CLGR, spec_CLGR = auc_lst([230, 256, 260, 264, 268, 272], ['Heid_Blank', 'Heid_Marked'])
# Getting mean and standard deviation of AUC and specificity for baseline, LNTL, TABE and CLGR models
# Tested on unbiased images
df_plain = pd.DataFrame([[np.mean(AUC_baseline[0:6]), np.std(AUC_baseline[0:6]), np.mean(spec_baseline[0:6]), np.std(spec_baseline[0:6])],
[np.mean(AUC_LNTL[0:6]), np.std(AUC_LNTL[0:6]), np.mean(spec_LNTL[0:6]), np.std(spec_LNTL[0:6])],
[np.mean(AUC_TABE[0:6]), np.std(AUC_TABE[0:6]), np.mean(spec_TABE[0:6]), np.std(spec_TABE[0:6])],
[np.mean(AUC_CLGR[0:6]), np.std(AUC_CLGR[0:6]), np.mean(spec_CLGR[0:6]), np.std(spec_CLGR[0:6])]],
columns=['AUC_mean', 'AUC_std', 'specificity_mean', 'specificity_std'])
df_plain['test'] = 'plain'
# tested on biased images (marked)
df_marked = pd.DataFrame([[np.mean(AUC_baseline[6:]), np.std(AUC_baseline[6:]), np.mean(spec_baseline[6:]), np.std(spec_baseline[6:])],
[np.mean(AUC_LNTL[6:]), np.std(AUC_LNTL[6:]), np.mean(spec_LNTL[6:]), np.std(spec_LNTL[6:])],
[np.mean(AUC_TABE[6:]), np.std(AUC_TABE[6:]), np.mean(spec_TABE[6:]), np.std(spec_TABE[6:])],
[np.mean(AUC_CLGR[6:]), np.std(AUC_CLGR[6:]), | np.mean(spec_CLGR[6:]) | numpy.mean |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for gaussian_markov_chain module.
"""
import numpy as np
from ..gaussian_markov_chain import GaussianMarkovChain
from ..gaussian_markov_chain import VaryingGaussianMarkovChain
from ..gaussian import Gaussian, GaussianMoments
from ..gaussian import GaussianARD
from ..gaussian import GaussianGamma
from ..wishart import Wishart, WishartMoments
from ..gamma import Gamma, GammaMoments
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils import misc
from bayespy.utils.misc import TestCase
def kalman_filter(y, U, A, V, mu0, Cov0, out=None):
"""
Perform Kalman filtering to obtain filtered mean and covariance.
The parameters of the process may vary in time, thus they are
given as iterators instead of fixed values.
Parameters
----------
y : (N,D) array
"Normalized" noisy observations of the states, that is, the
observations multiplied by the precision matrix U (and possibly
other transformation matrices).
U : (N,D,D) array or N-list of (D,D) arrays
Precision matrix (i.e., inverse covariance matrix) of the observation
noise for each time instance.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Filtered mean of the states.
Cov : array
Filtered covariance of the states.
See also
--------
rts_smoother
"""
mu = mu0
Cov = Cov0
# Allocate memory for the results
(N,D) = np.shape(y)
X = np.empty((N,D))
CovX = np.empty((N,D,D))
# Update step for t=0
M = np.dot(np.dot(Cov, U[0]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[0]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
X[0,:] = mu
CovX[0,:,:] = Cov
#for (yn, Un, An, Vn) in zip(y, U, A, V):
for n in range(len(y)-1): #(yn, Un, An, Vn) in zip(y, U, A, V):
# Prediction step
mu = np.dot(A[n], mu)
Cov = np.dot(np.dot(A[n], Cov), A[n].T) + V[n]
# Update step
M = np.dot(np.dot(Cov, U[n+1]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[n+1]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
# Force symmetric covariance (for numeric inaccuracy)
Cov = 0.5*Cov + 0.5*Cov.T
# Store results
X[n+1,:] = mu
CovX[n+1,:,:] = Cov
return (X, CovX)
def rts_smoother(mu, Cov, A, V, removethis=None):
"""
Perform Rauch-Tung-Striebel smoothing to obtain the posterior.
The function returns the posterior mean and covariance of each
state. The parameters of the process may vary in time, thus they
are given as iterators instead of fixed values.
Parameters
----------
mu : (N,D) array
Mean of the states from Kalman filter.
Cov : (N,D,D) array
Covariance of the states from Kalman filter.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Posterior mean of the states.
Cov : array
Posterior covariance of the states.
See also
--------
kalman_filter
"""
N = len(mu)
#n = N-1
# Start from the last time instance and smoothen backwards
x = mu[-1,:]
Covx = Cov[-1,:,:]
for n in reversed(range(N-1)):#(An, Vn) in zip(reversed(A), reversed(V)):
#n = n - 1
#if n <= 0:
# break
# The predicted value of n
x_p = np.dot(A[n], mu[n,:])
Cov_p = np.dot(np.dot(A[n], Cov[n,:,:]), A[n].T) + V[n]
# Temporary variable
S = np.linalg.solve(Cov_p, np.dot(A[n], Cov[n,:,:]))
# Smoothed value of n
x = mu[n,:] + np.dot(S.T, x-x_p)
Covx = Cov[n,:,:] + np.dot(np.dot(S.T, Covx-Cov_p), S)
# Force symmetric covariance (for numeric inaccuracy)
Covx = 0.5*Covx + 0.5*Covx.T
# Store results
mu[n,:] = x
Cov[n,:] = Covx
return (mu, Cov)
class TestGaussianMarkovChain(TestCase):
def create_model(self, N, D):
# Construct the model
Mu = Gaussian(np.random.randn(D),
np.identity(D))
Lambda = Wishart(D,
random.covariance(D))
A = Gaussian(np.random.randn(D,D),
np.identity(D))
V = Gamma(D,
np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N)
Y = Gaussian(X, np.identity(D))
return (Y, X, Mu, Lambda, A, V)
def test_plates(self):
"""
Test that plates are handled correctly.
"""
def test_message_to_mu0(self):
pass
def test_message_to_Lambda0(self):
pass
def test_message_to_A(self):
pass
def test_message_to_v(self):
pass
def test_message_to_parents(self):
""" Check gradient passed to inputs parent node """
N = 3
D = 2
Mu = Gaussian(np.random.randn(D), random.covariance(D))
Lambda = Wishart(D, random.covariance(D))
A = Gaussian(np.random.randn(D,D), random.covariance(D))
V = Gamma(D, np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N+1)
Y = Gaussian(X, random.covariance(D))
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
self.assert_message_to_parent(X, Mu, eps=1e-8)
self.assert_message_to_parent(
X,
Lambda,
eps=1e-8,
postprocess=lambda u: [
u[0] + linalg.transpose(u[0], ndim=1),
u[1],
]
)
self.assert_message_to_parent(X, A)
self.assert_message_to_parent(X, V, eps=1e-10, atol=1e-5)
pass
def test_message_to_parents_with_inputs(self):
""" Check gradient passed to inputs parent node """
def check(Mu, Lambda, A, V, U):
X = GaussianMarkovChain(Mu, Lambda, A, V, inputs=U)
Y = Gaussian(X, random.covariance(D))
# Check moments
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
X.update()
# Check gradient messages to parents
self.assert_message_to_parent(X, Mu)
self.assert_message_to_parent(
X,
Lambda,
postprocess=lambda phi: [
phi[0] + linalg.transpose(phi[0], ndim=1),
phi[1]
]
)
self.assert_message_to_parent(
X,
A,
postprocess=lambda phi: [
phi[0],
phi[1] + linalg.transpose(phi[1], ndim=1),
]
)
self.assert_message_to_parent(X, V)
self.assert_message_to_parent(X, U)
N = 4
D = 2
K = 3
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
Gaussian(
np.random.randn(D,D+K),
random.covariance(D+K)
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
GaussianGamma(
np.random.randn(D,D+K),
random.covariance(D+K),
D,
np.random.rand(D),
ndim=1
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
pass
def test_message_to_child(self):
"""
Test the updating of GaussianMarkovChain.
Check that the moments and the lower bound contribution are computed
correctly.
"""
# TODO: Add plates and missing values!
# Dimensionalities
D = 3
N = 5
(Y, X, Mu, Lambda, A, V) = self.create_model(N, D)
# Inference with arbitrary observations
y = np.random.randn(N,D)
Y.observe(y)
X.update()
(x_vb, xnxn_vb, xpxn_vb) = X.get_moments()
# Get parameter moments
(mu0, mumu0) = Mu.get_moments()
(icov0, logdet0) = Lambda.get_moments()
(a, aa) = A.get_moments()
(icov_x, logdetx) = V.get_moments()
icov_x = np.diag(icov_x)
# Prior precision
Z = np.einsum('...kij,...kk->...ij', aa, icov_x)
U_diag = [icov0+Z] + (N-2)*[icov_x+Z] + [icov_x]
U_super = (N-1) * [-np.dot(a.T, icov_x)]
U = misc.block_banded(U_diag, U_super)
# Prior mean
mu_prior = np.zeros(D*N)
mu_prior[:D] = np.dot(icov0,mu0)
# Data
Cov = np.linalg.inv(U + np.identity(D*N))
mu = np.dot(Cov, mu_prior + y.flatten())
# Moments
xx = mu[:,np.newaxis]*mu[np.newaxis,:] + Cov
mu = np.reshape(mu, (N,D))
xx = np.reshape(xx, (N,D,N,D))
# Check results
self.assertAllClose(x_vb, mu,
msg="Incorrect mean")
for n in range(N):
self.assertAllClose(xnxn_vb[n,:,:], xx[n,:,n,:],
msg="Incorrect second moment")
for n in range(N-1):
self.assertAllClose(xpxn_vb[n,:,:], xx[n,:,n+1,:],
msg="Incorrect lagged second moment")
# Compute the entropy H(X)
ldet = linalg.logdet_cov(Cov)
H = random.gaussian_entropy(-ldet, N*D)
# Compute <log p(X|...)>
xx = np.reshape(xx, (N*D, N*D))
mu = np.reshape(mu, (N*D,))
ldet = -logdet0 - np.sum(np.ones((N-1,D))*logdetx)
P = random.gaussian_logpdf(np.einsum('...ij,...ij',
xx,
U),
np.einsum('...i,...i',
mu,
mu_prior),
np.einsum('...ij,...ij',
mumu0,
icov0),
-ldet,
N*D)
# The VB bound from the net
l = X.lower_bound_contribution()
self.assertAllClose(l, H+P)
# Compute the true bound <log p(X|...)> + H(X)
#
# Simple tests
#
def check(N, D, plates=None, mu=None, Lambda=None, A=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if A is None:
A = np.random.randn(D,D)
if V is None:
V = np.random.rand(D)
X = GaussianMarkovChain(mu,
Lambda,
A,
V,
plates=plates,
n=N)
(u0, u1, u2) = X._message_to_child()
(mu, mumu) = Gaussian._ensure_moments(mu, GaussianMoments, ndim=1).get_moments()
(Lambda, _) = Wishart._ensure_moments(Lambda, WishartMoments, ndim=1).get_moments()
(a, aa) = Gaussian._ensure_moments(A, GaussianMoments, ndim=1).get_moments()
a = a * np.ones((N-1,D,D)) # explicit broadcasting for simplicity
aa = aa * np.ones((N-1,D,D,D)) # explicit broadcasting for simplicity
(v, _) = Gamma._ensure_moments(V, GammaMoments).get_moments()
v = v * np.ones((N-1,D))
plates_C = X.plates
plates_mu = X.plates
C = np.zeros(plates_C + (N,D,N,D))
plates_mu = np.shape(mu)[:-1]
m = np.zeros(plates_mu + (N,D))
m[...,0,:] = np.einsum('...ij,...j->...i', Lambda, mu)
C[...,0,:,0,:] = Lambda + np.einsum('...dij,...d->...ij',
aa[...,0,:,:,:],
v[...,0,:])
for n in range(1,N-1):
C[...,n,:,n,:] = (np.einsum('...dij,...d->...ij',
aa[...,n,:,:,:],
v[...,n,:])
+ v[...,n,:,None] * np.identity(D))
for n in range(N-1):
C[...,n,:,n+1,:] = -np.einsum('...di,...d->...id',
a[...,n,:,:],
v[...,n,:])
C[...,n+1,:,n,:] = -np.einsum('...di,...d->...di',
a[...,n,:,:],
v[...,n,:])
C[...,-1,:,-1,:] = v[...,-1,:,None]*np.identity(D)
C = np.reshape(C, plates_C+(N*D,N*D))
Cov = np.linalg.inv(C)
Cov = np.reshape(Cov, plates_C+(N,D,N,D))
m0 = np.einsum('...minj,...nj->...mi', Cov, m)
m1 = np.zeros(plates_C+(N,D,D))
m2 = np.zeros(plates_C+(N-1,D,D))
for n in range(N):
m1[...,n,:,:] = Cov[...,n,:,n,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n,:])
for n in range(N-1):
m2[...,n,:,:] = Cov[...,n,:,n+1,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n+1,:])
self.assertAllClose(m0, u0*np.ones(np.shape(m0)))
self.assertAllClose(m1, u1*np.ones(np.shape(m1)))
self.assertAllClose(m2, u2*np.ones(np.shape(m2)))
pass
check(4,1)
check(4,3)
#
# Test mu
#
# Simple
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3)))
# Plates
check(4,3,
mu=Gaussian(np.random.randn(5,6,3),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=(5,)))
check(4,3,
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(1,)))
#
# Test Lambda
#
# Simple
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3)))
# Plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,)))
check(4,3,
Lambda=Wishart(10+np.random.rand(1),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(1,)))
#
# Test A
#
# Simple
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
# Plates on time axis
check(5,3,
A=GaussianARD(np.random.randn(4,3,3),
np.random.rand(4,3,3),
shape=(3,),
plates=(4,3)))
# Plates on time axis with broadcasted moments
check(5,3,
A=GaussianARD(np.random.randn(1,3,3),
np.random.rand(1,3,3),
shape=(3,),
plates=(4,3)))
check(5,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(4,3)))
# Plates
check(4,3,
A=GaussianARD(np.random.randn(5,6,1,3,3),
np.random.rand(5,6,1,3,3),
shape=(3,),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(5,1,3)))
check(4,3,
A=GaussianARD(np.random.randn(1,1,3,3),
np.random.rand(1,1,3,3),
shape=(3,),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(1,1,3)))
#
# Test v
#
# Simple
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
V=Gamma(np.random.rand(3),
np.random.rand(3),
plates=(3,)))
# Plates
check(4,3,
V=Gamma(np.random.rand(5,6,1,3),
np.random.rand(5,6,1,3),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(5,1,3)))
check(4,3,
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(1,1,3)))
#
# Check with input signals
#
mu = 2
Lambda = 3
A = 4
B = 5
v = 6
inputs = [[-2], [3]]
X = GaussianMarkovChain([mu], [[Lambda]], [[A,B]], [v], inputs=inputs)
V = (np.array([[v*A**2, -v*A, 0],
[-v*A, v*A**2, -v*A],
[0, -v*A, 0]]) +
np.array([[Lambda, 0, 0],
[0, v, 0],
[0, 0, v]]))
m = (np.array([Lambda*mu, 0, 0]) +
np.array([0, v*B*inputs[0][0], v*B*inputs[1][0]]) -
np.array([v*A*B*inputs[0][0], v*A*B*inputs[1][0], 0]))
Cov = np.linalg.inv(V)
mean = np.dot(Cov, m)
X.update()
u = X.get_moments()
self.assertAllClose(u[0], mean[:,None])
self.assertAllClose(u[1] - u[0][...,None,:]*u[0][...,:,None],
Cov[(0,1,2),(0,1,2),None,None])
self.assertAllClose(u[2] - u[0][...,:-1,:,None]*u[0][...,1:,None,:],
Cov[(0,1),(1,2),None,None])
pass
def test_smoothing(self):
"""
Test the posterior estimation of GaussianMarkovChain.
Create time-variant dynamics and compare the results of BayesPy VB
inference and standard Kalman filtering & smoothing.
This is not that useful anymore, because the moments are checked much
better in another test method.
"""
#
# Set up an artificial system
#
# Dimensions
N = 500
D = 2
# Dynamics (time varying)
A0 = np.array([[.9, -.4], [.4, .9]])
A1 = np.array([[.98, -.1], [.1, .98]])
l = np.linspace(0, 1, N-1).reshape((-1,1,1))
A = (1-l)*A0 + l*A1
# Innovation covariance matrix (time varying)
v = np.random.rand(D)
V = np.diag(v)
# Observation noise covariance matrix
C = np.identity(D)
#
# Simulate data
#
X = np.empty((N,D))
Y = np.empty((N,D))
x = np.array([0.5, -0.5])
X[0,:] = x
Y[0,:] = x + np.random.multivariate_normal(np.zeros(D), C)
for n in range(N-1):
x = np.dot(A[n,:,:],x) + np.random.multivariate_normal(np.zeros(D), V)
X[n+1,:] = x
Y[n+1,:] = x + np.random.multivariate_normal(np.zeros(D), C)
#
# BayesPy inference
#
# Construct VB model
Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, 1/v, n=N)
Yh = Gaussian(Xh, np.identity(D), plates=(N,))
# Put data
Yh.observe(Y)
# Run inference
Xh.update()
# Store results
Xh_vb = Xh.u[0]
CovXh_vb = Xh.u[1] - Xh_vb[...,np.newaxis,:] * Xh_vb[...,:,np.newaxis]
#
# "The ground truth" using standard Kalman filter and RTS smoother
#
V = N*(V,)
UY = Y
U = N*(C,)
(Xh, CovXh) = kalman_filter(UY, U, A, V, np.zeros(D), np.identity(D))
(Xh, CovXh) = rts_smoother(Xh, CovXh, A, V)
#
# Check results
#
self.assertTrue(np.allclose(Xh_vb, Xh))
self.assertTrue(np.allclose(CovXh_vb, CovXh))
class TestVaryingGaussianMarkovChain(TestCase):
def test_plates_from_parents(self):
"""
Test that VaryingGaussianMarkovChain deduces plates correctly
"""
def check(plates_X,
plates_mu=(),
plates_Lambda=(),
plates_B=(),
plates_S=(),
plates_v=()):
D = 3
K = 2
N = 4
np.random.seed(42)
mu = Gaussian(np.random.randn(*(plates_mu+(D,))),
random.covariance(D))
Lambda = Wishart(D+np.ones(plates_Lambda),
random.covariance(D))
B = GaussianARD(np.random.randn(*(plates_B+(D,D,K))),
1+np.random.rand(*(plates_B+(D,D,K))),
shape=(D,K),
plates=plates_B+(D,))
S = GaussianARD(np.random.randn(*(plates_S+(N,K))),
1+np.random.rand(*(plates_S+(N,K))),
shape=(K,),
plates=plates_S+(N,))
v = Gamma(1+np.random.rand(*(plates_v+(1,D))),
1+np.random.rand(*(plates_v+(1,D))))
X = VaryingGaussianMarkovChain(mu, Lambda, B, S, v, name="X")
self.assertEqual(plates_X, X.plates,
msg="Incorrect plates deduced")
pass
check(())
check((2,3),
plates_mu=(2,3))
check((6,7),
plates_Lambda=(6,7))
check((2,3),
plates_B=(2,3))
check((2,3),
plates_S=(2,3))
check((2,3),
plates_v=(2,3))
pass
def test_message_to_child(self):
# A very simple check before the more complex ones:
# 1-D process, k=1, fixed constant parameters
m = 1.0
l = 4.0
b = 2.0
s = [3.0, 8.0]
v = 5.0
X = VaryingGaussianMarkovChain([m],
[[l]],
[[[b]]],
[[s[0]],[s[1]]],
[v])
(u0, u1, u2) = X._message_to_child()
C = np.array([[l+b**2*s[0]**2*v, -b*s[0]*v, 0],
[ -b*s[0]*v, v+b**2*s[1]**2*v, -b*s[1]*v],
[ 0, -b*s[1]*v, v]])
Cov = np.linalg.inv(C)
m0 = np.dot(Cov, [[l*m], [0], [0]])
m1 = np.diag(Cov)[:,None,None] + m0[:,:,None]**2
m2 = np.diag(Cov, k=1)[:,None,None] + m0[1:,:,None]*m0[:-1,:,None]
self.assertAllClose(m0, u0)
self.assertAllClose(m1, u1)
self.assertAllClose(m2, u2)
def check(N, D, K, plates=None, mu=None, Lambda=None, B=None, S=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if B is None:
B = np.random.randn(D,D,K)
if S is None:
S = | np.random.randn(N-1,K) | numpy.random.randn |
#-*- coding: utf-8 -*-
from __future__ import (print_function, division,
absolute_import, unicode_literals)
from cellularautomata2d import CellAutomata2D
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import colors as colors
from matplotlib import animation
class Sandpiles(CellAutomata2D):
def __init__(self, xlen, ylen, pbc=False, maxheight=4, cmap="Reds"):
CellAutomata2D.__init__(self, xlen, ylen, pbc=pbc, dtype=int)
self.maxheight = int(maxheight)
# Create auxiliar lattices
# Stores the modifications in the last step
self._auxlatt = np.zeros((self._ylen_bc, self._xlen_bc),
dtype=self.dtype)
# Stores the cells bigger than maxheight in the last step
self._collapse = np.zeros((self._ylen_bc, self._xlen_bc),
dtype=self.dtype)
# Create colormap for plots
self.vmincolor = 0 - 0.5
self.vmaxcolor = 2*maxheight + 0.5
bounds = np.arange(0, 2*maxheight, 2*maxheight + 1) - 0.5
self.cnorm = colors.BoundaryNorm(bounds, 256)
self.cmap = plt.cm.get_cmap(cmap, 9)
def randomfill(self, minval=0, maxval=None):
"""Fill the lattice randomly with values in the given range.
"""
if maxval == None:
maxval = self.maxheight
for idx, height in np.ndenumerate(self.latt):
self.latt[idx] = np.random.randint(minval, maxval)
return
def randomfill_mass(self, mass):
"""Fill the lattice randomly up to a given total mass.
"""
for i in range(mass):
flat_idx = np.random.randint(0, self.size-1)
self.latt.flat[flat_idx] += 1
return
# def mass(self):
# """Return the value of the total mass of the system.
#
# """
# lattmass = self.latt.sum()
# return lattmass
def _evolvestep(self):
"""Evolve the system one step.
Returns
-------
is_active : bool
True if the lattice have moved and False otherwise.
"""
self._auxlatt.fill(0)
self._collapse[self._latt_idx] = (self.latt
> self.maxheight).astype(int)
self._auxlatt -= self.maxheight*self._collapse
self._auxlatt += np.roll(self._collapse, 1, axis=0)
self._auxlatt += np.roll(self._collapse, -1, axis=0)
self._auxlatt += | np.roll(self._collapse, 1, axis=1) | numpy.roll |
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
import datetime as dt
from tinymce.models import HTMLField
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.urls import reverse
from django.template.defaultfilters import slugify
from django.conf import settings
from django.db.models import Avg, Max, Min
import numpy as np
def post_save_user_model(sender,instance,created,*args,**kwargs):
if created:
try:
Profile.objects.create(user = instance)
except:
pass
post_save.connect(post_save_user_model,sender=settings.AUTH_USER_MODEL)
class Profile(models.Model):
profile_image = models.ImageField(blank=True,upload_to='profiles/')
bio = models.TextField()
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
def save_profile(self):
self.save()
@classmethod
def get_by_id(cls, id):
profile = Profile.objects.get(user = id)
return profile
def filter_by_id(cls, id):
profile = Profile.objects.filter(user = id).first()
return profile
def get_absolute_url(self):
return reverse('user_profile')
def __str__(self):
return self.user
class Project(models.Model):
"""
This is the class we will use to create images
"""
image_url = models.ImageField(upload_to = "images/")
title = models.CharField(max_length = 30)
description = HTMLField()
poster = models.ForeignKey(User,related_name='images')
date = models.DateTimeField(auto_now_add = True,null = True)
url = models.URLField(max_length = 100)
def average_design(self):
design_ratings = list(map(lambda x: x.design_rating, self.reviews.all()))
return np.mean(design_ratings)
def average_usability(self):
usability_ratings = list(map(lambda x: x.usability_rating, self.reviews.all()))
return | np.mean(usability_ratings) | numpy.mean |
from __future__ import division
import copy
import numpy as np
from ellipse.geometry import Geometry, DEFAULT_EPS, DEFAULT_STEP, PHI_MIN
from ellipse.integrator import integrators, BI_LINEAR
DEFAULT_SCLIP = 3.
class Sample(object):
def __init__(self, image, sma, x0=None, y0=None, astep=DEFAULT_STEP, eps=DEFAULT_EPS, position_angle=0.0,
sclip=DEFAULT_SCLIP, nclip=0, linear_growth=False, integrmode=BI_LINEAR, geometry=None):
'''
A Sample instance describes an elliptical path over the image, over which
intensities can be extracted using a selection of integration algorithms.
The Sample instance contains a 'geometry' attribute that describes its geometry.
Parameters
----------
:param image: numpy 2-d array
pixels
:param sma: float
the semi-major axis length in pixels
:param x0: float, default=None
the X coordinate of the ellipse center
:param y0: foat, default=None
the Y coordinate of the ellipse center
:param astep: float, default=0.1
step value for growing/shrinking the semi-
major axis. It can be expressed either in
pixels (when 'linear_growth'=True) or in
relative value (when 'linear_growth=False')
:param eps: ellipticity, default=0.2
ellipticity
:param pa: float, default=0.0
position angle of ellipse in relation to the
+X axis of the image array (rotating towards
the +Y axis).
:param sclip: float, default=3.0
sigma-clip value
:param nclip: int, default=0
number of sigma-clip interations. If 0, skip sigma-clipping.
:param linear_growth: boolean, default=False
semi-major axis growing/shrinking mode
:param integrmode: string, default=BI_LINEAR
area integration mode, as defined in module integrator.py
:param geometry: Geometry instance, default=None
the geometry that describes the ellipse. This can be used in
lieu of the explicit specification of parameters 'sma', 'x0',
'y0', 'eps', etc. In any case, the Geometry instance
becomes an attribute of the Sample object.
Attributes
----------
:param values: 2-d numpy array
sampled values as a 2-d numpy array
with the following structure:
values[0] = 1-d array with angles
values[1] = 1-d array with radii
values[2] = 1-d array with intensity
:param mean: float
the mean intensity along the elliptical path
:param gradient: float
the local radial intensity gradient
:param gradient_error: float
the error associated with the local radial intensity gradient
:param gradient_relative_error: float
the relative error associated with the local radial intensity gradient
:param sector_area: float
the average area of the sectors along the
elliptical path where the sample values
were integrated from.
:param total_points: int
the total number of sample values that would
cover the entire elliptical path
:param actual_points: int
the actual number of sample values that were
taken from the image. It can be smaller than
total_points when the ellipse encompasses
regions outside the image, or when signa-clipping
removed some of the points.
'''
self.image = image
self.integrmode = integrmode
if geometry:
# when the geometry is inherited from somewhere else,
# its 'sma' attribute must be replaced by the value
# explicitly passed to the constructor.
self.geometry = copy.deepcopy(geometry)
self.geometry.sma = sma
else:
# if no center was specified, assume it's roughly
# coincident with the image center
_x0 = x0
_y0 = y0
if not _x0 or not _y0:
_x0 = image.shape[0] / 2
_y0 = image.shape[1] / 2
self.geometry = Geometry(_x0, _y0, sma, eps, position_angle, astep, linear_growth)
# sigma-clip parameters
self.sclip = sclip
self.nclip = nclip
# extracted values associated with this sample.
self.values = None
self.mean = None
self.gradient = None
self.gradient_error = None
self.gradient_relative_error = None
self.sector_area = None
# total_points reports the total number of pairs angle-radius that
# were attempted. actual_points reports the actual number of sampled
# pairs angle-radius that resulted in valid values.
self.total_points = 0
self.actual_points = 0
def extract(self):
''' Build sample by scanning elliptical path over image array
:return: numpy 2-d array
contains three elements. Each element is a 1-d
array containing respectively angles, radii, and
extracted intensity values.
'''
# the sample values themselves are kept cached to prevent
# multiple calls to the integrator code.
if self.values is not None:
return self.values
else:
s = self._extract()
self.values = s
return s
def _extract(self):
# Here the actual sampling takes place. This is called only once
# during the life of a Sample instance, because it's an expensive
# calculation. This method should not be called from external code.
# If one wants to force it to re-run, then do:
#
# sample.values = None
#
# before calling sample.extract()
# individual extracted sample points will be stored in here
angles = []
radii = []
intensities = []
sector_areas = []
# reset counters
self.total_points = 0
self.actual_points = 0
# build integrator
integrator = integrators[self.integrmode](self.image, self.geometry, angles, radii, intensities)
# initialize walk along elliptical path
radius = self.geometry.initial_polar_radius
phi = self.geometry.initial_polar_angle
# In case of an area integrator, ask the integrator to deliver a hint of how much
# area the sectors will have. In case of too small areas, tests showed that the
# area integrators (mean, median) won't perform properly. In that case, we override
# the caller's selection and use the bi-linear integrator regardless.
if integrator.is_area():
integrator.integrate(radius, phi)
area = integrator.get_sector_area()
# this integration that just took place messes up with the storage
# arrays and the constructors. We have to build a new integrator
# instance from scratch, even if it is the same kind as originally
# selected by the caller.
angles = []
radii = []
intensities = []
if area < 1.0:
integrator = integrators[BI_LINEAR](self.image, self.geometry, angles, radii, intensities)
else:
integrator = integrators[self.integrmode](self.image, self.geometry, angles, radii, intensities)
# walk along elliptical path, integrating at specified
# places defined by polar vector. Need to go a bit beyond
# full circle to ensure full coverage.
while (phi <= np.pi*2.+PHI_MIN):
# do the integration at phi-radius position, and append
# results to the angles, radii, and intensities lists.
integrator.integrate(radius, phi)
# store sector area locally
sector_areas.append(integrator.get_sector_area())
# update total number of points
self.total_points += 1
# update angle and radius to be used to define
# next polar vector along the elliptical path
phistep_ = integrator.get_polar_angle_step()
phi += min(phistep_, 0.5)
radius = self.geometry.radius(phi)
# average sector area is calculated after the integrator had
# the opportunity to step over the entire elliptical path.
self.sector_area = np.mean(np.array(sector_areas))
# apply sigma-clipping.
angles, radii, intensities = self._sigma_clip(angles, radii, intensities)
# actual number of sampled points, after sigma-clip removed outliers.
self.actual_points = len(angles)
# pack results in 2-d array
result = np.array([np.array(angles), np.array(radii), np.array(intensities)])
return result
def _sigma_clip(self, angles, radii, intensities):
if self.nclip > 0:
for iter in range(self.nclip):
angles, radii, intensities = self._iter_sigma_clip(angles.copy(), radii.copy(), intensities.copy())
return np.array(angles), np.array(radii), np.array(intensities)
def _iter_sigma_clip(self, angles, radii, intensities):
# Can't use scipy or astropy tools because they use masked arrays.
# Also, they operate on a single array, and we need to operate on
# three arrays simultaneously. We need something that physically
# removes the clipped points from the arrays, since that is what
# the remaining of the 'ellipse' code expects.
r_angles = []
r_radii = []
r_intensities = []
values = np.array(intensities)
mean = np.mean(values)
sig = np.std(values)
lower = mean - self.sclip * sig
upper = mean + self.sclip * sig
count = 0
for k in range(len(intensities)):
if intensities[k] >= lower and intensities[k] < upper:
r_angles.append(angles[k])
r_radii.append(radii[k])
r_intensities.append(intensities[k])
count += 1
return r_angles, r_radii, r_intensities
def update(self):
''' Update this Sample instance with the mean intensity and
local gradient values.
'''
step = self.geometry.astep
# Update the mean value first, using extraction from main sample.
s = self.extract()
self.mean = np.mean(s[2])
# Get sample with same geometry but at a different distance from
# center. Estimate gradient from there.
gradient, gradient_error = self._get_gradient(step)
# Check for meaningful gradient. If no meaningful gradient, try
# another sample, this time using larger radius. Meaningful
# gradient means something shallower, but still close to within
# a factor 3 from previous gradient estimate. If no previous
# estimate is available, guess it.
previous_gradient = self.gradient
if not previous_gradient:
previous_gradient = -0.05 # good enough, based on usage
if gradient >= (previous_gradient / 3.): # gradient is negative!
gradient, gradient_error = self._get_gradient(2 * step)
# If still no meaningful gradient can be measured, try with previous
# one, slightly shallower. A factor 0.8 is not too far from what is
# expected from geometrical sampling steps of 10-20% and a
# deVaucouleurs law or an exponential disk (at least at its inner parts,
# r <~ 5 req). Gradient error is meaningless in this case.
if gradient >= (previous_gradient / 3.):
gradient = previous_gradient * 0.8
gradient_error = None
self.gradient = gradient
self.gradient_error = gradient_error
if gradient_error:
self.gradient_relative_error = gradient_error / np.abs(gradient)
else:
self.gradient_relative_error = None
def _get_gradient(self, step):
gradient_sma = (1. + step) * self.geometry.sma
gradient_sample = Sample(self.image, gradient_sma,
x0=self.geometry.x0, y0=self.geometry.y0,
astep=self.geometry.astep,
sclip=self.sclip,
nclip=self.nclip,
eps=self.geometry.eps, position_angle=self.geometry.pa,
linear_growth=self.geometry.linear_growth,
integrmode=self.integrmode)
sg = gradient_sample.extract()
mean_g = np.mean(sg[2])
gradient = (mean_g - self.mean) / self.geometry.sma / step
s = self.extract()
sigma = np.std(s[2])
sigma_g = np.std(sg[2])
gradient_error = np.sqrt(sigma**2 / len(s[2]) + sigma_g**2 / len(sg[2])) / self.geometry.sma / step
return gradient, gradient_error
def coordinates(self):
'''
Returns the X-Y coordinates associated with each sampled point.
:return: 1-D numpy arrays
two arrays with the X and Y coordinates, respectively
'''
angles = self.values[0]
radii = self.values[1]
x = np.zeros(len(angles))
y = np.zeros(len(angles))
for i in range(len(x)):
x[i] = radii[i] * np.cos(angles[i] + self.geometry.pa) + self.geometry.x0
y[i] = radii[i] * | np.sin(angles[i] + self.geometry.pa) | numpy.sin |
# -*- coding: utf-8 -*-
"""Orientation models."""
import numpy as np
from .closures import compute_closure
def jeffery_ode(a, t, xi, L, closure="IBOF", **kwargs):
"""ODE describing Jeffery's model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>
'The motion of ellipsoidal particles immersed in a viscous fluid',
Proceedings of the Royal Society A, 1922.
https://doi.org/10.1098/rspa.1922.0078
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
return dadt.ravel()
def folgar_tucker_ode(a, t, xi, L, Ci=0.0, closure="IBOF", **kwargs):
"""ODE describing the Folgar-Tucker model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME> III,
'Orientation behavior of fibers in concentrated suspensions',
Journal of Reinforced Plastic Composites 3, 98-119, 1984.
https://doi.org/10.1177%2F073168448400300201
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
+ 2 * Ci * G * (delta - 3 * a)
)
return dadt.ravel()
def maier_saupe_ode(a, t, xi, L, Ci=0.0, U0=0.0, closure="IBOF", **kwargs):
"""ODE using Folgar-Tucker constant and Maier-Saupe potential.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.1).
U0 : float
Maier-Saupe Potential (in 3D stable for y U0 < 8 Ci).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
'Comparative numerical study of two concentrated fiber suspension models',
Journal of Non-Newtonian Fluid Mechanics 165, 764-781, 2010.
https://doi.org/10.1016/j.jnnfm.2010.04.001
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
+ 2
* G
* (
Ci * (delta - 3 * a)
+ U0
* (np.einsum("ik,kj->ij", a, a) - np.einsum("ijkl,kl->ij", A, a))
)
)
return dadt.ravel()
def iard_ode(a, t, xi, L, Ci=0.0, Cm=0.0, closure="IBOF", **kwargs):
"""ODE describing iARD model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Cm : float
Anisotropy factor (0 < Cm < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'An objective tensor to predict anisotropic fiber orientation in concentrated suspensions',
Journal of Rheology 60, 215, 2016.
https://doi.org/10.1122/1.4939098
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
D2 = np.einsum("ik,kj->ij", D, D)
D2_norm = np.sqrt(1.0 / 2.0 * np.einsum("ij,ij", D2, D2))
Dr = Ci * (delta - Cm * D2 / D2_norm)
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_iard = G * (
2 * Dr
- 2 * np.trace(Dr) * a
- 5 * np.einsum("ik,kj->ij", Dr, a)
- 5 * np.einsum("ik,kj->ij", a, Dr)
+ 10 * | np.einsum("ijkl,kl->ij", A, Dr) | numpy.einsum |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import unittest.mock as mk
from math import cos, sin
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.modeling import models, rotations
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6]),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(1e-5, 1e-4), (40, -20.56), (21.5, 45.9),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_roundtrip_sky_rotation(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90*u.deg)
x, y = model(1*u.deg, 0*u.arcsec)
assert_quantity_allclose([x, y], [0, 1]*u.deg, atol=1e-10*u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_Rotation2D_errors():
model = models.Rotation2D(angle=90*u.deg)
# Bad evaluation input shapes
x = np.array([1, 2])
y = | np.array([1, 2, 3]) | numpy.array |
import numpy as np
from PIL import Image, ImageChops
from matplotlib.animation import FFMpegWriter
import matplotlib.pyplot as plt
from pathlib import Path
BACKGROUND_COLOUR = "#000000FF"
FRAME_RATE = 60
static_street_frames = FRAME_RATE
fade_to_background_frames = 5 * FRAME_RATE
static_wire_frames = FRAME_RATE
fade_to_foreground_frames = 5 * FRAME_RATE
fade_to_background_indices = | np.linspace(1, 2001, fade_to_background_frames, dtype=np.int64) | numpy.linspace |
# PrufPlantAnalysis
#
# This class defines key analytical routines for the PRUF/WRA Benchmarking
# standard operational assessment.
#
# The PrufPlantAnalysis object is a factory which instantiates either the Pandas, Dask, or Spark
# implementation depending on what the user prefers.
#
# The resulting object is loaded as a plugin into each PlantData object.
import random
import numpy as np
import pandas as pd
import statsmodels.api as sm
from tqdm import tqdm
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from operational_analysis.toolkits import met_data_processing as mt
from operational_analysis.toolkits import timeseries as tm
from operational_analysis.toolkits.machine_learning_setup import MachineLearningSetup
from operational_analysis.toolkits import unit_conversion as un
from operational_analysis.toolkits import filters
from operational_analysis.types import timeseries_table
from operational_analysis import logged_method_call
from operational_analysis import logging
logger = logging.getLogger(__name__)
class MonteCarloAEP(object):
"""
A serial (Pandas-driven) implementation of the benchmark PRUF operational
analysis implementation. This module collects standard processing and
analysis methods for estimating plant level operational AEP and uncertainty.
The preprocessing should run in this order:
1. Process revenue meter energy - creates monthly/daily data frame, gets revenue meter on monthly/daily basis, and adds
data flag
2. Process loss estimates - add monthly/daily curtailment and availabilty losses to monthly/daily data frame
3. Process reanalysis data - add monthly/daily density-corrected wind speeds, temperature (if used) and wind direction (if used)
from several reanalysis products to the monthly data frame
4. Set up Monte Carlo - create the necessary Monte Carlo inputs to the OA process
5. Run AEP Monte Carlo - run the OA process iteratively to get distribution of AEP results
The end result is a distribution of AEP results which we use to assess expected AEP and associated uncertainty
"""
@logged_method_call
def __init__(self, plant, reanal_products=["merra2", "ncep2", "erai","era5"], uncertainty_meter=0.005, uncertainty_losses=0.05,
uncertainty_windiness=(10, 20), uncertainty_loss_max=(10, 20),
uncertainty_nan_energy=0.01, time_resolution = 'M', reg_model = 'lin', ml_setup_kwargs={},
reg_temperature = False, reg_winddirection = False):
"""
Initialize APE_MC analysis with data and parameters.
Args:
plant(:obj:`PlantData object`): PlantData object from which PlantAnalysis should draw data.
reanal_products(obj:`list`) : List of reanalysis products to use for Monte Carlo sampling. Defaults to ["merra2", "ncep2", "erai"].
uncertainty_meter(:obj:`float`): uncertainty on revenue meter data
uncertainty_losses(:obj:`float`): uncertainty on long-term losses
uncertainty_windiness(:obj:`tuple`): number of years to use for the windiness correction
uncertainty_loss_max(:obj:`tuple`): threshold for the combined availabilty and curtailment monthly loss threshold
uncertainty_nan_energy(:obj:`float`): threshold to flag days/months based on NaNs
time_resolution(:obj:`string`): whether to perform the AEP calculation at monthly ('M') or daily ('D') time resolution
reg_model(:obj:`string`): which model to use for the regression ('lin' for linear, 'gam', 'gbm', 'etr'). At monthly time resolution only linear regression is allowed because of the reduced number of data points.
ml_setup_kwargs(:obj:`kwargs`): keyword arguments to MachineLearningSetup class
reg_temperature(:obj:`bool`): whether to include temperature (True) or not (False) as regression input
reg_winddirection(:obj:`bool`): whether to include wind direction (True) or not (False) as regression input
"""
logger.info("Initializing MonteCarloAEP Analysis Object")
self._aggregate = timeseries_table.TimeseriesTable.factory(plant._engine)
self._plant = plant # defined at runtime
self._reanal_products = reanal_products # set of reanalysis products to use
# Memo dictionaries help speed up computation
self.outlier_filtering = {} # Combinations of outlier filter results
self.long_term_sampling = {} # Combinations of long-term reanalysis data sampling
self.opt_model = {} # Optimized ML model hyperparameters for each reanalysis product
# Define relevant uncertainties, data ranges and max thresholds to be applied in Monte Carlo sampling
self.uncertainty_meter = np.float64(uncertainty_meter)
self.uncertainty_losses = np.float64(uncertainty_losses)
self.uncertainty_windiness = np.array(uncertainty_windiness, dtype=np.float64)
self.uncertainty_loss_max = np.array(uncertainty_loss_max, dtype=np.float64)
self.uncertainty_nan_energy = np.float64(uncertainty_nan_energy)
# Check that selected time resolution is allowed
if time_resolution not in ['M','D']:
raise ValueError("time_res has to either be M (monthly, default) or D (daily)")
self.time_resolution = time_resolution
self._resample_freq = {"M": 'MS', "D": 'D'}[self.time_resolution]
self._hours_in_res = {"M": 30*24, "D": 1*24}[self.time_resolution]
self._calendar_samples = {"M": 12, "D": 365.25}[self.time_resolution]
self.num_days_lt = (31, 28.25, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
# Check that choices for regression inputs are allowed
if reg_temperature not in [True, False]:
raise ValueError("reg_temperature has to either be True (if temperature is considered in the regression), or False (if temperature is omitted")
if reg_winddirection not in [True, False]:
raise ValueError("reg_winddirection has to either be True (if wind direction is considered in the regression), or False (if wind direction is omitted")
self.reg_winddirection = reg_winddirection
self.reg_temperature = reg_temperature
# Build list of regression variables
self._rean_vars = []
if self.reg_temperature:
self._rean_vars += ["temperature_K"]
if self.reg_winddirection:
self._rean_vars += ["u_ms", "v_ms"]
# Check that selected regression model is allowed
if reg_model not in ['lin', 'gbm','etr','gam']:
raise ValueError("reg_model has to either be lin (Linear regression, default), gbm (Gradient boosting model), etr (Extra trees regressor) or gam (Generalized additive model)")
self.reg_model = reg_model
self.ml_setup_kwargs = ml_setup_kwargs
# Monthly data can only use robust linear regression because of limited number of data
if (time_resolution == 'M') & (reg_model != 'lin'):
raise ValueError("For monthly time resolution, only linear regression is allowed!")
# Run preprocessing step
self.calculate_aggregate_dataframe()
# Store start and end of period of record
self._start_por = self._aggregate.df.index.min()
self._end_por = self._aggregate.df.index.max()
# Create a data frame to store monthly/daily reanalysis data over plant period of record
self._reanalysis_por = self._aggregate.df.loc[(self._aggregate.df.index >= self._start_por) & \
(self._aggregate.df.index <= self._end_por)]
@logged_method_call
def run(self, num_sim, reanal_subset=None):
"""
Perform pre-processing of data into an internal representation for which the analysis can run more quickly.
Args:
reanal_subset(:obj:`list`): list of str data indicating which reanalysis products to use in OA
num_sim(:obj:`int`): number of simulations to perform
Returns:
None
"""
self.num_sim = num_sim
if reanal_subset is None:
self.reanal_subset = self._reanal_products
else:
self.reanal_subset = reanal_subset
# Write parameters of run to the log file
logged_self_params = ["uncertainty_meter", "uncertainty_losses", "uncertainty_loss_max", "uncertainty_windiness",
"uncertainty_nan_energy", "num_sim", "reanal_subset"]
logged_params = {name: getattr(self, name) for name in logged_self_params}
logger.info("Running with parameters: {}".format(logged_params))
# Start the computation
self.calculate_long_term_losses()
self.setup_monte_carlo_inputs()
self.results = self.run_AEP_monte_carlo()
# Log the completion of the run
logger.info("Run completed")
def plot_reanalysis_normalized_rolling_monthly_windspeed(self):
"""
Make a plot of annual average wind speeds from reanalysis data to show general trends for each
Highlight the period of record for plant data
Returns:
matplotlib.pyplot object
"""
import matplotlib.pyplot as plt
project = self._plant
# Define parameters needed for plot
min_val = 1 # Default parameter providing y-axis minimum for shaded plant POR region
max_val = 1 # Default parameter providing y-axis maximum for shaded plant POR region
por_start = self._aggregate.df.index[0] # Start of plant POR
por_end = self._aggregate.df.index[-1] # End of plant POR
plt.figure(figsize=(14, 6))
for key in self._reanal_products:
rean_df = project._reanalysis._product[key].df # Set reanalysis product
ann_mo_ws = rean_df.resample('MS')['ws_dens_corr'].mean().to_frame() # Take monthly average wind speed
ann_roll = ann_mo_ws.rolling(12).mean() # Calculate rolling 12-month average
ann_roll_norm = ann_roll['ws_dens_corr'] / ann_roll[
'ws_dens_corr'].mean() # Normalize rolling 12-month average
# Update min_val and max_val depending on range of data
if ann_roll_norm.min() < min_val:
min_val = ann_roll_norm.min()
if ann_roll_norm.max() > max_val:
max_val = ann_roll_norm.max()
# Plot wind speed
plt.plot(ann_roll_norm, label=key)
# Plot dotted line at y=1 (i.e. average wind speed)
plt.plot((ann_roll.index[0], ann_roll.index[-1]), (1, 1), 'k--')
# Fill in plant POR region
plt.fill_between([por_start, por_end], [min_val, min_val], [max_val, max_val], alpha=0.1, label='Plant POR')
# Final touches to plot
plt.xlabel('Year')
plt.ylabel('Normalized wind speed')
plt.legend()
plt.tight_layout()
return plt
def plot_reanalysis_gross_energy_data(self, outlier_thres):
"""
Make a plot of normalized 30-day gross energy vs wind speed for each reanalysis product, include R2 measure
Args:
outlier_thres (float): outlier threshold (typical range of 1 to 4) which adjusts outlier sensitivity detection
Returns:
matplotlib.pyplot object
"""
import matplotlib.pyplot as plt
valid_aggregate = self._aggregate.df
plt.figure(figsize=(9, 9))
# Loop through each reanalysis product and make a scatterplot of monthly wind speed vs plant energy
for p in np.arange(0, len(list(self._reanal_products))):
col_name = self._reanal_products[p] # Reanalysis column in monthly data frame
x = sm.add_constant(valid_aggregate[col_name]) # Define 'x'-values (constant needed for regression function)
if self.time_resolution == 'M':
y = valid_aggregate['gross_energy_gwh'] * 30 / valid_aggregate[
'num_days_expected'] # Normalize energy data to 30-days
elif self.time_resolution == 'D':
y = valid_aggregate['gross_energy_gwh']
rlm = sm.RLM(y, x, M=sm.robust.norms.HuberT(
t=outlier_thres)) # Robust linear regression with HuberT algorithm (threshold equal to 2)
rlm_results = rlm.fit()
r2 = np.corrcoef(x.loc[rlm_results.weights == 1, col_name], y[rlm_results.weights == 1])[
0, 1] # Get R2 from valid data
# Plot results
plt.subplot(2, 2, p + 1)
plt.plot(x.loc[rlm_results.weights != 1, col_name], y[rlm_results.weights != 1], 'rx', label='Outlier')
plt.plot(x.loc[rlm_results.weights == 1, col_name], y[rlm_results.weights == 1], '.', label='Valid data')
plt.title(col_name + ', R2=' + str(np.round(r2, 3)))
plt.xlabel('Wind speed (m/s)')
if self.time_resolution == 'M':
plt.ylabel('30-day normalized gross energy (GWh)')
elif self.time_resolution == 'D':
plt.ylabel('Daily gross energy (GWh)')
plt.tight_layout()
return plt
def plot_result_aep_distributions(self):
"""
Plot a distribution of AEP values from the Monte-Carlo OA method
Returns:
matplotlib.pyplot object
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(14, 12))
sim_results = self.results
ax = fig.add_subplot(2, 2, 1)
ax.hist(sim_results['aep_GWh'], 40, density=1)
ax.text(0.05, 0.9, 'AEP mean = ' + str(np.round(sim_results['aep_GWh'].mean(), 1)) + ' GWh/yr',
transform=ax.transAxes)
ax.text(0.05, 0.8, 'AEP unc = ' + str(
np.round(sim_results['aep_GWh'].std() / sim_results['aep_GWh'].mean() * 100, 1)) + "%",
transform=ax.transAxes)
plt.xlabel('AEP (GWh/yr)')
ax = fig.add_subplot(2, 2, 2)
ax.hist(sim_results['avail_pct'] * 100, 40, density=1)
ax.text(0.05, 0.9, 'Mean = ' + str(np.round((sim_results['avail_pct'].mean()) * 100, 1)) + ' %',
transform=ax.transAxes)
plt.xlabel('Availability Loss (%)')
ax = fig.add_subplot(2, 2, 3)
ax.hist(sim_results['curt_pct'] * 100, 40, density=1)
ax.text(0.05, 0.9, 'Mean: ' + str(np.round((sim_results['curt_pct'].mean()) * 100, 2)) + ' %',
transform=ax.transAxes)
plt.xlabel('Curtailment Loss (%)')
plt.tight_layout()
return plt
def plot_aep_boxplot(self, param, lab):
"""
Plot box plots of AEP results sliced by a specified Monte Carlo parameter
Args:
param(:obj:`list`): The Monte Carlo parameter on which to split the AEP results
lab(:obj:`str`): The name to use for the parameter when producing the figure
Returns:
(none)
"""
import matplotlib.pyplot as plt
sim_results = self.results
tmp_df=pd.DataFrame(data={'aep': sim_results.aep_GWh, 'param': param})
tmp_df.boxplot(column='aep',by='param',figsize=(8,6))
plt.ylabel('AEP (GWh/yr)')
plt.xlabel(lab)
plt.title('AEP estimates by %s' % lab)
plt.suptitle("")
plt.tight_layout()
return plt
def plot_aggregate_plant_data_timeseries(self):
"""
Plot timeseries of monthly/daily gross energy, availability and curtailment
Returns:
matplotlib.pyplot object
"""
import matplotlib.pyplot as plt
valid_aggregate = self._aggregate.df
plt.figure(figsize=(12, 9))
# Gross energy
plt.subplot(2, 1, 1)
plt.plot(valid_aggregate.gross_energy_gwh, '.-')
plt.grid('on')
plt.xlabel('Year')
plt.ylabel('Gross energy (GWh)')
# Availability and curtailment
plt.subplot(2, 1, 2)
plt.plot(valid_aggregate.availability_pct * 100, '.-', label='Availability')
plt.plot(valid_aggregate.curtailment_pct * 100, '.-', label='Curtailment')
plt.grid('on')
plt.xlabel('Year')
plt.ylabel('Loss (%)')
plt.legend()
plt.tight_layout()
return plt
@logged_method_call
def groupby_time_res(self, df):
"""
Group pandas dataframe based on the time resolution chosen in the calculation.
Args:
df(:obj:`dataframe`): dataframe that needs to be grouped based on time resolution used
Returns:
None
"""
if self.time_resolution == 'M':
df_grouped = df.groupby(df.index.month).mean()
elif self.time_resolution == 'D':
df_grouped = df.groupby([(df.index.month),(df.index.day)]).mean()
return df_grouped
@logged_method_call
def calculate_aggregate_dataframe(self):
"""
Perform pre-processing of the plant data to produce a monthly/daily data frame to be used in AEP analysis.
Args:
(None)
Returns:
(None)
"""
# Average to monthly/daily, quantify NaN data
self.process_revenue_meter_energy()
# Average to monthly/daily, quantify NaN data, merge with revenue meter energy data
self.process_loss_estimates()
# Density correct wind speeds, process temperature and wind direction, average to monthly/daily
self.process_reanalysis_data()
# Remove first and last reporting months if only partial month reported
# (only for monthly time resolution calculations)
if self.time_resolution == 'M':
self.trim_monthly_df()
# Drop any data that have NaN gross energy values or NaN reanalysis data
self._aggregate.df = self._aggregate.df.dropna(subset=['gross_energy_gwh']
+[product for product in self._reanal_products])
@logged_method_call
def process_revenue_meter_energy(self):
"""
Initial creation of monthly data frame:
1. Populate monthly/daily data frame with energy data summed from 10-min QC'd data
2. For each monthly/daily value, find percentage of NaN data used in creating it and flag if percentage is
greater than 0
Args:
(None)
Returns:
(None)
"""
df = getattr(self._plant, 'meter').df # Get the meter data frame
# Create the monthly/daily data frame by summing meter energy
self._aggregate.df = (df.resample(self._resample_freq)['energy_kwh'].sum() / 1e6).to_frame() # Get monthly energy values in GWh
self._aggregate.df.rename(columns={"energy_kwh": "energy_gwh"}, inplace=True) # Rename kWh to MWh
# Determine how much 10-min data was missing for each year-month/daily energy value. Flag accordigly if any is missing
self._aggregate.df['energy_nan_perc'] = df.resample(self._resample_freq)['energy_kwh'].apply(
tm.percent_nan) # Get percentage of meter data that were NaN when summing to monthly/daily
if self.time_resolution == 'M':
# Create a column with expected number of days per month (to be used when normalizing to 30-days for regression)
days_per_month = (pd.Series(self._aggregate.df.index)).dt.daysinmonth
days_per_month.index = self._aggregate.df.index
self._aggregate.df['num_days_expected'] = days_per_month
# Get actual number of days per month in the raw data
# (used when trimming beginning and end of monthly data frame)
# If meter data has higher resolution than monthly
if (self._plant._meter_freq == '1MS') | (self._plant._meter_freq == '1M'):
self._aggregate.df['num_days_actual'] = self._aggregate.df['num_days_expected']
else:
self._aggregate.df['num_days_actual'] = df.resample('MS')['energy_kwh'].apply(tm.num_days)
@logged_method_call
def process_loss_estimates(self):
"""
Append availability and curtailment losses to monthly data frame
Args:
(None)
Returns:
(None)
"""
df = getattr(self._plant, 'curtail').df
curt_aggregate = np.divide(df.resample(self._resample_freq)[['availability_kwh', 'curtailment_kwh']].sum(),
1e6) # Get sum of avail and curt losses in GWh
curt_aggregate.rename(columns={'availability_kwh': 'availability_gwh', 'curtailment_kwh': 'curtailment_gwh'},
inplace=True)
# Merge with revenue meter monthly/daily data
self._aggregate.df = self._aggregate.df.join(curt_aggregate)
# Add gross energy field
self._aggregate.df['gross_energy_gwh'] = un.compute_gross_energy(self._aggregate.df['energy_gwh'],
self._aggregate.df['availability_gwh'],
self._aggregate.df['curtailment_gwh'], 'energy',
'energy')
# Calculate percentage-based losses
self._aggregate.df['availability_pct'] = np.divide(self._aggregate.df['availability_gwh'],
self._aggregate.df['gross_energy_gwh'])
self._aggregate.df['curtailment_pct'] = np.divide(self._aggregate.df['curtailment_gwh'],
self._aggregate.df['gross_energy_gwh'])
self._aggregate.df['avail_nan_perc'] = df.resample(self._resample_freq)['availability_kwh'].apply(
tm.percent_nan) # Get percentage of 10-min meter data that were NaN when summing to monthly/daily
self._aggregate.df['curt_nan_perc'] = df.resample(self._resample_freq)['curtailment_kwh'].apply(
tm.percent_nan) # Get percentage of 10-min meter data that were NaN when summing to monthly/daily
self._aggregate.df['nan_flag'] = False # Set flag to false by default
self._aggregate.df.loc[(self._aggregate.df['energy_nan_perc'] > self.uncertainty_nan_energy) |
(self._aggregate.df['avail_nan_perc'] > self.uncertainty_nan_energy) |
(self._aggregate.df['curt_nan_perc'] > self.uncertainty_nan_energy), 'nan_flag'] \
= True # If more than 1% of data are NaN, set flag to True
# By default, assume all reported losses are representative of long-term operational
self._aggregate.df['availability_typical'] = True
self._aggregate.df['curtailment_typical'] = True
# By default, assume combined availability and curtailment losses are below the threshold to be considered valid
self._aggregate.df['combined_loss_valid'] = True
@logged_method_call
def process_reanalysis_data(self):
"""
Process reanalysis data for use in PRUF plant analysis:
- calculate density-corrected wind speed and wind components
- get monthly/daily average wind speeds and components
- calculate monthly/daily average wind direction
- calculate monthly/daily average temperature
- append monthly/daily averages to monthly/daily energy data frame
Args:
(None)
Returns:
(None)
"""
# Define empty data frame that spans past our period of interest
self._reanalysis_aggregate = pd.DataFrame(index=pd.date_range(start='1997-01-01', end='2019-12-31',
freq=self._resample_freq), dtype=float)
# Now loop through the different reanalysis products, density-correct wind speeds, and take monthly averages
for key in self._reanal_products:
rean_df = self._plant._reanalysis._product[key].df
rean_df['ws_dens_corr'] = mt.air_density_adjusted_wind_speed(rean_df['windspeed_ms'],
rean_df['rho_kgm-3']) # Density correct wind speeds
self._reanalysis_aggregate[key] = rean_df.resample(self._resample_freq)['ws_dens_corr'].mean() # .to_frame() # Get average wind speed by year-month
if self.reg_winddirection | self.reg_temperature:
namescol = [key + '_' + var for var in self._rean_vars]
self._reanalysis_aggregate[namescol] = rean_df[self._rean_vars].resample(self._resample_freq).mean()
if self.reg_winddirection: # if wind direction is considered as regression variable
self._reanalysis_aggregate[key + '_wd'] = np.rad2deg(np.pi-(np.arctan2(-self._reanalysis_aggregate[key + '_u_ms'],self._reanalysis_aggregate[key + '_v_ms']))) # Calculate wind direction
self._aggregate.df = self._aggregate.df.join(
self._reanalysis_aggregate) # Merge monthly reanalysis data to monthly energy data frame
@logged_method_call
def trim_monthly_df(self):
"""
Remove first and/or last month of data if the raw data had an incomplete number of days
Args:
(None)
Returns:
(None)
"""
for p in self._aggregate.df.index[[0, -1]]: # Loop through 1st and last data entry
if self._aggregate.df.loc[p, 'num_days_expected'] != self._aggregate.df.loc[p, 'num_days_actual']:
self._aggregate.df.drop(p, inplace=True) # Drop the row from data frame
@logged_method_call
def calculate_long_term_losses(self):
"""
This function calculates long-term availability and curtailment losses based on the reported data grouped by the time resolution,
filtering for those data that are deemed representative of average plant performance.
Args:
(None)
Returns:
(None)
"""
df = self._aggregate.df
# isolate availabilty and curtailment values that are representative of average plant performance
avail_valid = df.loc[df['availability_typical'],'availability_pct'].to_frame()
curt_valid = df.loc[df['curtailment_typical'],'curtailment_pct'].to_frame()
# Now get average percentage losses by month or day
avail_long_term = self.groupby_time_res(avail_valid)['availability_pct']
curt_long_term = self.groupby_time_res(curt_valid)['curtailment_pct']
# Ensure there are 12 or 365 data points in long-term average. If not, throw an exception:
if (avail_long_term.shape[0] < int(self._calendar_samples)):
raise Exception('Not all calendar days/months represented in long-term availability calculation')
if (curt_long_term.shape[0] < int(self._calendar_samples)):
raise Exception('Not all calendar days/months represented in long-term curtailment calculation')
self.long_term_losses = (avail_long_term, curt_long_term)
def setup_monte_carlo_inputs(self):
"""
Create and populate the data frame defining the simulation parameters.
This data frame is stored as self._inputs
Args:
(None)
Returns:
(None)
"""
# Create extra long list of renanalysis product names to sample from
reanal_list = list(np.repeat(self.reanal_subset, self.num_sim))
inputs = {
"reanalysis_product": np.asarray(random.sample(reanal_list, self.num_sim)),
"metered_energy_fraction": np.random.normal(1, self.uncertainty_meter, self.num_sim),
"loss_fraction": np.random.normal(1, self.uncertainty_losses, self.num_sim),
"num_years_windiness": np.random.randint(self.uncertainty_windiness[0],self.uncertainty_windiness[1] + 1, self.num_sim),
"loss_threshold":np.random.randint(self.uncertainty_loss_max[0], self.uncertainty_loss_max[1] + 1, self.num_sim) / 100.,
}
self._inputs = pd.DataFrame(inputs)
@logged_method_call
def filter_outliers(self, n):
"""
This function filters outliers based on a combination of range filter, unresponsive sensor filter,
and window filter.
We use a memoized funciton to store the regression data in a dictionary for each combination as it
comes up in the Monte Carlo simulation. This saves significant computational time in not having to run
robust linear regression for each Monte Carlo iteration
Args:
n(:obj:`float`): Monte Carlo iteration
Returns:
:obj:`pandas.DataFrame`: Filtered monthly/daily data ready for linear regression
"""
reanal = self._run.reanalysis_product
# Check if valid data has already been calculated and stored. If so, just return it
if (reanal, self._run.loss_threshold) in self.outlier_filtering:
valid_data = self.outlier_filtering[(reanal, self._run.loss_threshold)]
return valid_data
# If valid data hasn't yet been stored in dictionary, determine the valid data
df = self._aggregate.df
# First set of filters checking combined losses and if the Nan data flag was on
df_sub = df.loc[
((df['availability_pct'] + df['curtailment_pct']) < self._run.loss_threshold) & (df['nan_flag'] == False),:]
# Set maximum range for using bin-filter, convert from MW to GWh
plant_capac = self._plant._plant_capacity/1000. * self._hours_in_res
# Apply range filter to wind speed
df_sub = df_sub.assign(flag_range=filters.range_flag(df_sub[reanal], below = 0, above = 40))
# Apply frozen/unresponsive sensor filter
df_sub.loc[:,'flag_frozen'] = filters.unresponsive_flag(df_sub[reanal], threshold = 3)
# Apply window range filter
df_sub.loc[:,'flag_window'] = filters.window_range_flag(window_col = df_sub[reanal],
window_start = 5.,
window_end = 40,
value_col = df_sub['energy_gwh'],
value_min = 0.02*plant_capac,
value_max = 1.2*plant_capac)
# Create a 'final' flag which is true if any of the previous flags are true
df_sub.loc[:,'flag_final'] = (df_sub.loc[:, 'flag_range']) | (df_sub.loc[:, 'flag_frozen']) | \
(df_sub.loc[:, 'flag_window'])
# Define valid data
valid_data = df_sub.loc[df_sub.loc[:, 'flag_final'] == False, [reanal,
'energy_gwh', 'availability_gwh',
'curtailment_gwh']]
if self.reg_winddirection:
valid_data_to_add = df_sub.loc[df_sub.loc[:, 'flag_final'] == False, [reanal + '_wd',
reanal + '_u_ms', reanal + '_v_ms']]
valid_data = pd.concat([valid_data, valid_data_to_add], axis=1)
if self.reg_temperature:
valid_data_to_add = df_sub.loc[df_sub.loc[:, 'flag_final'] == False, [reanal + '_temperature_K']]
valid_data = pd.concat([valid_data, valid_data_to_add], axis=1)
if self.time_resolution == 'M':
valid_data_to_add = df_sub.loc[df_sub.loc[:, 'flag_final'] == False, ['num_days_expected']]
valid_data = pd.concat([valid_data, valid_data_to_add], axis=1)
# Update the dictionary
self.outlier_filtering[(reanal, self._run.loss_threshold)] = valid_data
# Return result
return valid_data
@logged_method_call
def set_regression_data(self, n):
"""
This will be called for each iteration of the Monte Carlo simulation and will do the following:
1. Randomly sample monthly/daily revenue meter, availabilty, and curtailment data based on specified uncertainties
and correlations
2. Randomly choose one reanalysis product
3. Calculate gross energy from randomzied energy data
4. Normalize gross energy to 30-day months
5. Filter results to remove months/days with NaN data and with combined losses that exceed the Monte Carlo
sampled max threhold
6. Return the wind speed and normalized gross energy to be used in the regression relationship
Args:
n(:obj:`int`): The Monte Carlo iteration number
Returns:
:obj:`pandas.Series`: Monte-Carlo sampled wind speeds and other variables (temperature, wind direction) if used in the regression
:obj:`pandas.Series`: Monte-Carlo sampled normalized gross energy
"""
# Get data to use in regression based on filtering result
reg_data = self.filter_outliers(n)
# Now monte carlo sample the data
mc_energy = reg_data['energy_gwh'] * self._run.metered_energy_fraction # Create new Monte-Carlo sampled data frame and sample energy data
mc_availability = reg_data['availability_gwh'] * self._run.loss_fraction # Calculate MC-generated availability
mc_curtailment = reg_data['curtailment_gwh'] * self._run.loss_fraction # Calculate MC-generated curtailment
# Calculate gorss energy and normalize to 30-days
mc_gross_energy = mc_energy + mc_availability + mc_curtailment
if self.time_resolution == 'M':
num_days_expected = reg_data['num_days_expected']
mc_gross_norm = mc_gross_energy * 30 / num_days_expected # Normalize gross energy to 30-day months
elif self.time_resolution == 'D':
mc_gross_norm = mc_gross_energy
# Set reanalysis product
reg_inputs = reg_data[self._run.reanalysis_product] # Copy wind speed data to Monte Carlo data frame
if self.reg_temperature: # if temperature is considered as regression variable
mc_temperature = reg_data[self._run.reanalysis_product + "_temperature_K"] # Copy temperature data to Monte Carlo data frame
reg_inputs = pd.concat([reg_inputs,mc_temperature], axis = 1)
if self.reg_winddirection: # if wind direction is considered as regression variable
mc_wind_direction = reg_data[self._run.reanalysis_product + "_wd"] # Copy wind direction data to Monte Carlo data frame
reg_inputs = pd.concat([reg_inputs,np.sin(np.deg2rad(mc_wind_direction))], axis = 1)
reg_inputs = pd.concat([reg_inputs,np.cos(np.deg2rad(mc_wind_direction))], axis = 1)
reg_inputs = pd.concat([reg_inputs,mc_gross_norm], axis = 1)
# Return values needed for regression
return reg_inputs # Return randomly sampled wind speed, wind direction, temperature and normalized gross energy
@logged_method_call
def run_regression(self, n):
"""
Run robust linear regression between Monte-Carlo generated monthly/daily gross energy,
wind speed, temperature and wind direction (if used)
Args:
n(:obj:`int`): The Monte Carlo iteration number
Returns:
:obj:`?`: trained regression model
"""
reg_data = self.set_regression_data(n) # Get regression data
# Bootstrap input data to incorporate some regression uncertainty
reg_data = np.array(reg_data.sample(frac = 1.0, replace = True))
# Update Monte Carlo tracker fields
self._mc_num_points[n] = np.shape(reg_data)[0]
# Run regression. Note, the last column of reg_data is the target variable for the regression
# Linear regression
if self.reg_model == 'lin':
reg = LinearRegression().fit(np.array(reg_data[:,0:-1]), reg_data[:,-1])
predicted_y = reg.predict(np.array(reg_data[:,0:-1]))
self._mc_slope[n,:] = reg.coef_
self._mc_intercept[n] = np.float(reg.intercept_)
self._r2_score[n] = r2_score(reg_data[:,-1], predicted_y)
self._mse_score[n] = mean_squared_error(reg_data[:,-1], predicted_y)
return reg
# Machine learning models
else:
ml = MachineLearningSetup(self.reg_model, **self.ml_setup_kwargs)
# Memoized approach for optimized hyperparameters
if self._run.reanalysis_product in self.opt_model:
self.opt_model[(self._run.reanalysis_product)].fit( | np.array(reg_data[:,0:-1]) | numpy.array |
""" Unit tests for opt_probs.py"""
# Author: <NAME>
# License: BSD 3 clause
try:
import mlrose_hiive
except:
import sys
sys.path.append("..")
import unittest
import numpy as np
from mlrose_hiive import OneMax, DiscreteOpt, ContinuousOpt, TSPOpt, OnePointCrossOver
# The following functions/classes are not automatically imported at
# initialization, so must be imported explicitly from neural.py,
# activation.py and opt_probs.py
from mlrose_hiive.neural import NetworkWeights
from mlrose_hiive.neural.activation import identity
from mlrose_hiive.opt_probs._opt_prob import _OptProb as OptProb
class TestOptProb(unittest.TestCase):
"""Tests for _OptProb class."""
@staticmethod
def test_set_state_max():
"""Test set_state method for a maximization problem"""
problem = OptProb(5, OneMax(), maximize=True)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
assert (np.array_equal(problem.get_state(), x)
and problem.get_fitness() == 10)
@staticmethod
def test_set_state_min():
"""Test set_state method for a minimization problem"""
problem = OptProb(5, OneMax(), maximize=False)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
assert (np.array_equal(problem.get_state(), x)
and problem.get_fitness() == -10)
@staticmethod
def test_set_population_max():
"""Test set_population method for a maximization problem"""
problem = OptProb(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
pop_fit = np.array([1, 3, 4, 2, 100, 0, 5, -50])
problem.set_population(pop)
assert (np.array_equal(problem.get_population(), pop)
and np.array_equal(problem.get_pop_fitness(), pop_fit))
@staticmethod
def test_set_population_min():
"""Test set_population method for a minimization problem"""
problem = OptProb(5, OneMax(), maximize=False)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
pop_fit = -1.0*np.array([1, 3, 4, 2, 100, 0, 5, -50])
problem.set_population(pop)
assert (np.array_equal(problem.get_population(), pop)
and np.array_equal(problem.get_pop_fitness(), pop_fit))
@staticmethod
def test_best_child_max():
"""Test best_child method for a maximization problem"""
problem = OptProb(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
problem.set_population(pop)
x = problem.best_child()
assert np.array_equal(x, np.array([100, 0, 0, 0, 0]))
@staticmethod
def test_best_child_min():
"""Test best_child method for a minimization problem"""
problem = OptProb(5, OneMax(), maximize=False)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
problem.set_population(pop)
x = problem.best_child()
assert np.array_equal(x, np.array([0, 0, 0, 0, -50]))
@staticmethod
def test_best_neighbor_max():
"""Test best_neighbor method for a maximization problem"""
problem = OptProb(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
problem.neighbors = pop
x = problem.best_neighbor()
assert np.array_equal(x, np.array([100, 0, 0, 0, 0]))
@staticmethod
def test_best_neighbor_min():
"""Test best_neighbor method for a minimization problem"""
problem = OptProb(5, OneMax(), maximize=False)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
problem.neighbors = pop
x = problem.best_neighbor()
assert np.array_equal(x, np.array([0, 0, 0, 0, -50]))
@staticmethod
def test_eval_fitness_max():
"""Test eval_fitness method for a maximization problem"""
problem = OptProb(5, OneMax(), maximize=True)
x = np.array([0, 1, 2, 3, 4])
fitness = problem.eval_fitness(x)
assert fitness == 10
@staticmethod
def test_eval_fitness_min():
"""Test eval_fitness method for a minimization problem"""
problem = OptProb(5, OneMax(), maximize=False)
x = np.array([0, 1, 2, 3, 4])
fitness = problem.eval_fitness(x)
assert fitness == -10
@staticmethod
def test_eval_mate_probs():
"""Test eval_mate_probs method"""
problem = OptProb(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
problem.set_population(pop)
problem.eval_mate_probs()
probs = np.array([0.06667, 0.2, 0.26667, 0.13333, 0, 0.33333])
assert np.allclose(problem.get_mate_probs(), probs, atol=0.00001)
@staticmethod
def test_eval_mate_probs_maximize_false():
"""Test eval_mate_probs method"""
problem = OptProb(5, OneMax(), maximize=False)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
problem.set_population(pop)
problem.eval_mate_probs()
probs = np.array([0.26667, 0.13333, 0.06667, 0.2, 0.33333, 0])
assert np.allclose(problem.get_mate_probs(), probs, atol=0.00001)
@staticmethod
def test_eval_mate_probs_all_zero():
"""Test eval_mate_probs method when all states have zero fitness"""
problem = OptProb(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
problem.set_population(pop)
problem.eval_mate_probs()
probs = np.array([0.16667, 0.16667, 0.16667, 0.16667,
0.16667, 0.16667])
assert np.allclose(problem.get_mate_probs(), probs, atol=0.00001)
class TestDiscreteOpt(unittest.TestCase):
"""Tests for DiscreteOpt class."""
@staticmethod
def test_eval_node_probs():
"""Test eval_node_probs method"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
problem.keep_sample = pop
problem.eval_node_probs()
parent = np.array([2, 0, 1, 0])
probs = np.array([[[0.33333, 0.66667],
[0.33333, 0.66667]],
[[1.0, 0.0],
[0.33333, 0.66667]],
[[1.0, 0.0],
[0.25, 0.75]],
[[1.0, 0.0],
[0.0, 1.0]],
[[0.5, 0.5],
[0.25, 0.75]]])
assert (np.allclose(problem.node_probs, probs, atol=0.00001)
and np.array_equal(problem.parent_nodes, parent))
@staticmethod
def test_find_neighbors_max2():
"""Test find_neighbors method when max_val is equal to 2"""
problem = DiscreteOpt(5, OneMax(), maximize=True, max_val=2)
x = np.array([0, 1, 0, 1, 0])
problem.set_state(x)
problem.find_neighbors()
neigh = np.array([[1, 1, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 1, 1]])
assert np.array_equal(np.array(problem.neighbors), neigh)
@staticmethod
def test_find_neighbors_max_gt2():
"""Test find_neighbors method when max_val is greater than 2"""
problem = DiscreteOpt(5, OneMax(), maximize=True, max_val=3)
x = np.array([0, 1, 2, 1, 0])
problem.set_state(x)
problem.find_neighbors()
neigh = np.array([[1, 1, 2, 1, 0],
[2, 1, 2, 1, 0],
[0, 0, 2, 1, 0],
[0, 2, 2, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 0, 0],
[0, 1, 2, 2, 0],
[0, 1, 2, 1, 1],
[0, 1, 2, 1, 2]])
assert np.array_equal(np.array(problem.neighbors), neigh)
@staticmethod
def test_find_sample_order():
"""Test find_sample_order method"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
problem.parent_nodes = np.array([2, 0, 1, 0])
order = np.array([0, 2, 4, 1, 3])
problem.find_sample_order()
assert np.array_equal(np.array(problem.sample_order), order)
@staticmethod
def test_find_top_pct_max():
"""Test find_top_pct method for a maximization problem"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
problem.set_population(pop)
problem.find_top_pct(keep_pct=0.25)
x = np.array([[100, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
assert np.array_equal(problem.get_keep_sample(), x)
@staticmethod
def test_find_top_pct_min():
"""Test find_top_pct method for a minimization problem"""
problem = DiscreteOpt(5, OneMax(), maximize=False)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[100, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, -50]])
problem.set_population(pop)
problem.find_top_pct(keep_pct=0.25)
x = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, -50]])
assert np.array_equal(problem.get_keep_sample(), x)
@staticmethod
def test_random():
"""Test random method"""
problem = DiscreteOpt(5, OneMax(), maximize=True, max_val=5)
rand = problem.random()
assert (len(rand) == 5 and max(rand) >= 0 and min(rand) <= 4)
@staticmethod
def test_random_neighbor_max2():
"""Test random_neighbor method when max_val is equal to 2"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
x = np.array([0, 0, 1, 1, 1])
problem.set_state(x)
neigh = problem.random_neighbor()
sum_diff = np.sum(np.abs(x - neigh))
assert (len(neigh) == 5 and sum_diff == 1)
@staticmethod
def test_random_neighbor_max_gt2():
"""Test random_neighbor method when max_val is greater than 2"""
problem = DiscreteOpt(5, OneMax(), maximize=True, max_val=5)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
neigh = problem.random_neighbor()
abs_diff = np.abs(x - neigh)
abs_diff[abs_diff > 0] = 1
sum_diff = np.sum(abs_diff)
assert (len(neigh) == 5 and sum_diff == 1)
@staticmethod
def test_random_pop():
"""Test random_pop method"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
problem.random_pop(100)
pop = problem.get_population()
pop_fitness = problem.get_pop_fitness()
assert (np.shape(pop)[0] == 100 and np.shape(pop)[1] == 5
and np.sum(pop) > 0 and np.sum(pop) < 500
and len(pop_fitness) == 100)
@staticmethod
def test_reproduce_mut0():
"""Test reproduce method when mutation_prob is 0"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
father = np.array([0, 0, 0, 0, 0])
mother = np.array([1, 1, 1, 1, 1])
child = problem.reproduce(father, mother, mutation_prob=0)
assert (len(child) == 5 and sum(child) >= 0 and sum(child) <= 5)
@staticmethod
def test_reproduce_mut1_max2():
"""Test reproduce method when mutation_prob is 1 and max_val is 2"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
father = np.array([0, 0, 0, 0, 0])
mother = np.array([1, 1, 1, 1, 1])
child = problem.reproduce(father, mother, mutation_prob=1)
assert (len(child) == 5 and sum(child) >= 0 and sum(child) <= 5)
@staticmethod
def test_reproduce_mut1_max_gt2():
"""Test reproduce method when mutation_prob is 1 and max_val is
greater than 2"""
problem = DiscreteOpt(5, OneMax(),
maximize=True,
max_val=3)
problem._crossover = OnePointCrossOver(problem)
father = np.array([0, 0, 0, 0, 0])
mother = np.array([2, 2, 2, 2, 2])
child = problem.reproduce(father, mother, mutation_prob=1)
assert (len(child) == 5 and sum(child) > 0 and sum(child) < 10)
@staticmethod
def test_sample_pop():
"""Test sample_pop method"""
problem = DiscreteOpt(5, OneMax(), maximize=True)
pop = np.array([[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
problem.keep_sample = pop
problem.eval_node_probs()
sample = problem.sample_pop(100)
assert (np.shape(sample)[0] == 100 and np.shape(sample)[1] == 5
and np.sum(sample) > 0 and np.sum(sample) < 500)
class TestContinuousOpt(unittest.TestCase):
"""Tests for ContinuousOpt class."""
@staticmethod
def test_calculate_updates():
"""Test calculate_updates method"""
X = np.array([[0, 1, 0, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 1, 1],
[1, 0, 0, 0]])
y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])
nodes = [4, 2, 1]
fitness = NetworkWeights(X, y, nodes, activation=identity,
bias=False, is_classifier=False,
learning_rate=1)
a = list(np.arange(8) + 1)
b = list(0.01*(np.arange(2) + 1))
weights = a + b
fitness.evaluate(weights)
problem = ContinuousOpt(10, fitness, maximize=False)
updates = problem.calculate_updates()
update1 = np.array([[-0.0017, -0.0034],
[-0.0046, -0.0092],
[-0.0052, -0.0104],
[0.0014, 0.0028]])
update2 = np.array([[-3.17],
[-4.18]])
assert (np.allclose(updates[0], update1, atol=0.001)
and np.allclose(updates[1], update2, atol=0.001))
@staticmethod
def test_find_neighbors_range_eq_step():
"""Test find_neighbors method when range equals step size"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=1, step=1)
x = np.array([0, 1, 0, 1, 0])
problem.set_state(x)
problem.find_neighbors()
neigh = np.array([[1, 1, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 1, 1]])
assert np.array_equal(np.array(problem.neighbors), neigh)
@staticmethod
def test_find_neighbors_range_gt_step():
"""Test find_neighbors method when range greater than step size"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=2, step=1)
x = np.array([0, 1, 2, 1, 0])
problem.set_state(x)
problem.find_neighbors()
neigh = np.array([[1, 1, 2, 1, 0],
[0, 0, 2, 1, 0],
[0, 2, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 0, 0],
[0, 1, 2, 2, 0],
[0, 1, 2, 1, 1]])
assert np.array_equal(np.array(problem.neighbors), neigh)
@staticmethod
def test_random():
"""Test random method"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=4)
rand = problem.random()
assert (len(rand) == 5 and max(rand) >= 0 and min(rand) <= 4)
@staticmethod
def test_random_neighbor_range_eq_step():
"""Test random_neighbor method when range equals step size"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=1, step=1)
x = np.array([0, 0, 1, 1, 1])
problem.set_state(x)
neigh = problem.random_neighbor()
sum_diff = np.sum(np.abs(x - neigh))
assert (len(neigh) == 5 and sum_diff == 1)
@staticmethod
def test_random_neighbor_range_gt_step():
"""Test random_neighbor method when range greater than step size"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=2, step=1)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
neigh = problem.random_neighbor()
abs_diff = np.abs(x - neigh)
abs_diff[abs_diff > 0] = 1
sum_diff = np.sum(abs_diff)
assert (len(neigh) == 5 and sum_diff == 1)
@staticmethod
def test_random_pop():
"""Test random_pop method"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=1, step=1)
problem.random_pop(100)
pop = problem.get_population()
pop_fitness = problem.get_pop_fitness()
assert (np.shape(pop)[0] == 100 and np.shape(pop)[1] == 5
and np.sum(pop) > 0 and np.sum(pop) < 500
and len(pop_fitness) == 100)
@staticmethod
def test_reproduce_mut0():
"""Test reproduce method when mutation_prob is 0"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=1, step=1)
father = np.array([0, 0, 0, 0, 0])
mother = np.array([1, 1, 1, 1, 1])
child = problem.reproduce(father, mother, mutation_prob=0)
assert (len(child) == 5 and sum(child) > 0 and sum(child) < 5)
@staticmethod
def test_reproduce_mut1_range_eq_step():
"""Test reproduce method when mutation_prob is 1 and range equals
step size"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=1, step=1)
father = np.array([0, 0, 0, 0, 0])
mother = np.array([1, 1, 1, 1, 1])
child = problem.reproduce(father, mother, mutation_prob=1)
assert (len(child) == 5 and sum(child) > 0 and sum(child) < 5)
@staticmethod
def test_reproduce_mut1_range_gt_step():
"""Test reproduce method when mutation_prob is 1 and range is
greater than step size"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=2, step=1)
father = np.array([0, 0, 0, 0, 0])
mother = np.array([2, 2, 2, 2, 2])
child = problem.reproduce(father, mother, mutation_prob=1)
assert (len(child) == 5 and sum(child) > 0 and sum(child) < 10)
@staticmethod
def test_update_state_in_range():
"""Test update_state method where all updated values are within the
tolerated range"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=20, step=1)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
y = np.array([2, 4, 6, 8, 10])
updated = problem.update_state(y)
assert np.array_equal(updated, (x + y))
@staticmethod
def test_update_state_outside_range():
"""Test update_state method where some updated values are outside the
tolerated range"""
problem = ContinuousOpt(5, OneMax(), maximize=True,
min_val=0, max_val=5, step=1)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
y = np.array([2, -4, 6, -8, 10])
updated = problem.update_state(y)
z = np.array([2, 0, 5, 0, 5])
assert np.array_equal(updated, z)
class TestTSPOpt(unittest.TestCase):
"""Tests for TSPOpt class."""
@staticmethod
def test_adjust_probs_all_zero():
"""Test adjust_probs method when all elements in input vector sum to
zero."""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
problem = TSPOpt(5, distances=dists)
probs = np.zeros(5)
assert np.array_equal(problem.adjust_probs(probs), np.zeros(5))
@staticmethod
def test_adjust_probs_non_zero():
"""Test adjust_probs method when all elements in input vector sum to
some value other than zero."""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
problem = TSPOpt(5, distances=dists)
probs = np.array([0.1, 0.2, 0, 0, 0.5])
x = np.array([0.125, 0.25, 0, 0, 0.625])
assert np.array_equal(problem.adjust_probs(probs), x)
@staticmethod
def test_find_neighbors():
"""Test find_neighbors method"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
problem = TSPOpt(5, distances=dists)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
problem.find_neighbors()
neigh = np.array([[1, 0, 2, 3, 4],
[2, 1, 0, 3, 4],
[3, 1, 2, 0, 4],
[4, 1, 2, 3, 0],
[0, 2, 1, 3, 4],
[0, 3, 2, 1, 4],
[0, 4, 2, 3, 1],
[0, 1, 3, 2, 4],
[0, 1, 4, 3, 2],
[0, 1, 2, 4, 3]])
assert np.array_equal(np.array(problem.neighbors), neigh)
@staticmethod
def test_random():
"""Test random method"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
problem = TSPOpt(5, distances=dists)
rand = problem.random()
assert (len(rand) == 5 and len(set(rand)) == 5)
@staticmethod
def test_random_mimic():
"""Test random_mimic method"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
pop = np.array([[1, 0, 3, 2, 4],
[0, 2, 1, 3, 4],
[0, 2, 4, 3, 1],
[4, 1, 3, 2, 0],
[3, 4, 0, 2, 1],
[2, 4, 0, 3, 1]])
problem = TSPOpt(5, distances=dists)
problem.keep_sample = pop
problem.eval_node_probs()
problem.find_sample_order()
rand = problem.random_mimic()
assert (len(rand) == 5 and len(set(rand)) == 5)
@staticmethod
def test_random_neighbor():
"""Test random_neighbor method"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
problem = TSPOpt(5, distances=dists)
x = np.array([0, 1, 2, 3, 4])
problem.set_state(x)
neigh = problem.random_neighbor()
abs_diff = np.abs(x - neigh)
abs_diff[abs_diff > 0] = 1
sum_diff = np.sum(abs_diff)
assert (len(neigh) == 5 and sum_diff == 2 and len(set(neigh)) == 5)
@staticmethod
def test_reproduce_mut0():
"""Test reproduce method when mutation_prob is 0"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
problem = TSPOpt(5, distances=dists)
father = np.array([0, 1, 2, 3, 4])
mother = np.array([0, 4, 3, 2, 1])
child = problem.reproduce(father, mother, mutation_prob=0)
assert (len(child) == 5 and len(set(child)) == 5)
@staticmethod
def test_reproduce_mut1():
"""Test reproduce method when mutation_prob is 1"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
problem = TSPOpt(5, distances=dists)
father = np.array([0, 1, 2, 3, 4])
mother = np.array([4, 3, 2, 1, 0])
child = problem.reproduce(father, mother, mutation_prob=1)
assert (len(child) == 5 and len(set(child)) == 5)
@staticmethod
def test_sample_pop():
"""Test sample_pop method"""
dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),
(4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]
pop = np.array([[1, 0, 3, 2, 4],
[0, 2, 1, 3, 4],
[0, 2, 4, 3, 1],
[4, 1, 3, 2, 0],
[3, 4, 0, 2, 1],
[2, 4, 0, 3, 1]])
problem = TSPOpt(5, distances=dists)
problem.keep_sample = pop
problem.eval_node_probs()
sample = problem.sample_pop(100)
row_sums = np.sum(sample, axis=1)
assert ( | np.shape(sample) | numpy.shape |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 13:48:58 2015
@author: bcolsen
"""
from __future__ import division, print_function
import numpy as np
import pylab as plt
from .kde import kde
from scipy import stats
import sys
from io import BytesIO
import tempfile
#from gradient_bar import gbar
class ash:
def __init__(self, data, bin_num=None, shift_num=50, normed=True, force_scott = False, rule = 'scott'):
self.data_min = min(data)
self.data_max = max(data)
self.shift_num = shift_num
self.data = data
self.data_len = len(self.data)
self.normed=normed
##If None use KDE to autobin
if bin_num == None:
kde_result = kde(self.data)
if len(self.data) >= 50 and not force_scott and kde_result:
self.bw,self.kde_mesh,self.kde_den = kde_result
self.bins_from_bw()
self.bw2,self.kde_mesh,self.kde_den = kde(self.data, None, self.ash_mesh.min(), self.ash_mesh.max())
elif rule=='fd':
#print("Using FD rule")
kernel = stats.gaussian_kde(self.data)
self.bin_width = 2*(stats.iqr(self.data)/(len(self.data)**(1/3)))
self.bw_from_bin_width()
kernel.set_bandwidth(self.bw)
self.bins_from_bw()
self.kde_mesh = self.ash_mesh
self.kde_den = kernel(self.kde_mesh)
else:
#print("Using Scott's rule")
kernel = stats.gaussian_kde(self.data)
kernel.set_bandwidth(rule)
self.bw = kernel.factor * self.data.std() # kde factor is bandwidth scaled by sigma
self.bins_from_bw()
self.kde_mesh = self.ash_mesh
self.kde_den = kernel(self.kde_mesh)
else:
#print("Using bin number: ", bin_num)
self.set_bins(bin_num)
kernel = stats.gaussian_kde(self.data)
kernel.set_bandwidth(self.bw)
self.kde_mesh = self.ash_mesh
self.kde_den = kernel(self.kde_mesh)
#self.kde_mesh,self.kde_den
## KDE on same range as ASH
def set_bins(self,bin_num):
self.bin_num = bin_num
self.bin_width = (self.data_max-self.data_min)/self.bin_num
self.MIN = self.data_min - self.bin_width
self.MAX = self.data_max + self.bin_width
self.SHIFT = self.bin_width/self.shift_num
self.bw_from_bin_width()
self.calc_ash_den(self.normed)
self.calc_ash_unc()
def bins_from_bw(self):
self.bin_width = self.bw * np.sqrt(2*np.pi) #bin with full width half max of band width
self.bin_num = np.ceil(((self.data_max - self.data_min)/self.bin_width))
self.MIN = self.data_min - self.bin_width
self.MAX = self.data_min + self.bin_width*(self.bin_num + 1)
self.SHIFT = self.bin_width/self.shift_num
self.calc_ash_den(self.normed)
self.calc_ash_unc()#window at which 68.2% of the area is covered
def bw_from_bin_width(self):
self.bw = self.bin_width / np.sqrt(2*np.pi)
def calc_ash_den(self, normed=True):
self.ash_mesh = np.linspace(self.MIN,self.MAX,(self.bin_num+2)*self.shift_num)
self.ash_den = np.zeros_like(self.ash_mesh)
for i in range(self.shift_num):
hist_range = (self.MIN+i*self.SHIFT,self.MAX+i*self.SHIFT- self.bin_width)
hist, self.bin_edges = np.histogram(self.data,self.bin_num+1,range=hist_range,normed=normed)
#print(self.bin_edges[1]-self.bin_edges[0])
hist_mesh = np.ravel(np.meshgrid(hist,np.zeros(self.shift_num))[0],order='F')
self.ash_den = self.ash_den + np.r_[[0]*i,hist_mesh,[0]*(self.shift_num-i)] #pad hist_mesh with zeros and add
#print(ash_den)
self.ash_den = self.ash_den/self.shift_num #take the average
ash_den_index = np.where(self.ash_den > 0)
self.ash_mesh = self.ash_mesh[ash_den_index]
self.ash_den = self.ash_den[ash_den_index]
def calc_ash_unc(self):
'''window at which 68.2% of the area is covered'''
tot_area = | np.trapz(self.ash_den,self.ash_mesh) | numpy.trapz |
import os
os.environ["OMP_NUM_THREADS"] = "4"
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn import svm
from sklearn.metrics import accuracy_score
import gzip
def make_tensor(data_xy):
data_x, data_y = data_xy
data_x = np.array(data_x, dtype=np.float32)
data_y = np.array(data_y, dtype=np.int32)
return data_x, data_y
def load_pickle(f):
try:
import cPickle as thepickle
except ImportError:
import _pickle as thepickle
try:
ret = thepickle.load(f, encoding='latin1')
except TypeError:
ret = thepickle.load(f)
return ret
def load_data(data_file):
print('loading data ...')
f = gzip.open(data_file, 'rb')
train_set, valid_set, test_set = load_pickle(f)
f.close()
train_set_x, train_set_y = make_tensor(train_set) # (50000, 784) (50000,)
valid_set_x, valid_set_y = make_tensor(valid_set) # (10000, 784) (10000,)
test_set_x, test_set_y = make_tensor(test_set) # (10000, 784) (10000,)
return train_set_x, train_set_y, valid_set_x, valid_set_y, test_set_x, test_set_y
class CCA:
def __init__(self, n_components=1, r1=1e-4, r2=1e-4):
self.n_components = n_components
self.r1 = r1
self.r2 = r2
self.w = [None, None]
self.m = [None, None]
def fit(self, X1, X2):
N = X1.shape[0]
f1 = X1.shape[1]
f2 = X2.shape[1]
self.m[0] = np.mean(X1, axis=0, keepdims=True) # [1, f1]
self.m[1] = np.mean(X2, axis=0, keepdims=True)
H1bar = X1 - self.m[0]
H2bar = X2 - self.m[1]
SigmaHat12 = (1.0 / (N - 1)) * np.dot(H1bar.T, H2bar)
SigmaHat11 = (1.0 / (N - 1)) * np.dot(H1bar.T, H1bar) + self.r1 * np.identity(f1)
SigmaHat22 = (1.0 / (N - 1)) * np.dot(H2bar.T, H2bar) + self.r2 * np.identity(f2)
[D1, V1] = np.linalg.eigh(SigmaHat11)
[D2, V2] = np.linalg.eigh(SigmaHat22)
SigmaHat11RootInv = np.dot(np.dot(V1, np.diag(D1 ** -0.5)), V1.T)
SigmaHat22RootInv = np.dot(np.dot(V2, | np.diag(D2 ** -0.5) | numpy.diag |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.sky.skysubtractor Contains the SkySubtractor class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import io
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.interpolate import CloughTocher2DInterpolator as intp
from scipy.interpolate import SmoothBivariateSpline
# Test
# from sklearn.preprocessing import PolynomialFeatures
# from sklearn.linear_model import LinearRegression
# from sklearn.pipeline import Pipeline
# Import astronomical modules
from photutils.background import Background
from astropy.modeling import models
from astropy.modeling.fitting import LevMarLSQFitter
from astropy import stats
# Import the relevant PTS classes and modules
from ..core.frame import Frame
from ..basics.mask import Mask
from ..core.source import Source
from ..basics.geometry import Coordinate, Circle, Composite
from ..basics.region import Region
from ..basics.skyregion import SkyRegion
from ..tools import plotting, statistics, fitting, plotting
from ...core.basics.configurable import OldConfigurable
from ...core.tools.logging import log
from ...core.basics.distribution import Distribution
# -----------------------------------------------------------------
class SkySubtractor(OldConfigurable):
"""
This class ...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(SkySubtractor, self).__init__(config, "magic")
# -- Attributes --
# The image frame
self.frame = None
# The mask of sources
self.sources_mask = None
# The extra mask
self.extra_mask = None
# The principal shape
self.principal_shape = None
# The region of saturated stars
self.saturation_region = None
# The animation
self.animation = None
# The sky region
self.region = None
# The output mask (combined input + bad mask + galaxy annulus mask + expanded saturation mask + sigma-clipping mask)
self.mask = None
# The estimated sky (a single floating point value or a Frame, depending on the estimation method)
self.sky = None
# The estimated sky noise
self.noise = None
# Relevant for when estimation method is 'photutils'
self.phot_sky = None
self.phot_rms = None
# Relevant for when estimation method is 'pts'
self.apertures_frame = None
self.apertures_mean_frame = None
self.apertures_noise_frame = None
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new SkySubtractor instance
if arguments.config is not None: subtractor = cls(arguments.config)
elif arguments.settings is not None: subtractor = cls(arguments.settings)
else: subtractor = cls()
# Return the new instance
return subtractor
# -----------------------------------------------------------------
def run(self, frame, principal_shape, sources_mask, extra_mask=None, saturation_region=None, animation=None):
"""
This function ...
:param frame:
:param principal_shape:
:param sources_mask:
:param extra_mask:
:param saturation_region:
:param animation:
:return:
"""
# 1. Call the setup function
self.setup(frame, principal_shape, sources_mask, extra_mask, saturation_region, animation)
# 2. Create the sky region
self.create_region()
# 3. Create mask
self.create_mask()
# 4. Do an extra sigma-clipping step on the data
if self.config.sigma_clip_mask: self.sigma_clip()
# 5. Estimate the sky (and sky noise)
self.estimate()
# 6. Subtract the sky
self.subtract()
# 7. Set the frame to zero outside of the principal galaxy
if self.config.set_zero_outside: self.set_zero_outside()
# 8. Eliminate negative values from the frame, set them to zero
if self.config.eliminate_negatives: self.eliminate_negatives()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing the sky subtractor ...")
# Set default values for all attributes
self.frame = None
self.sources_mask = None
self.extra_mask = None
self.principal_shape = None
self.saturation_region = None
self.animation = None
self.mask = None
self.sky = None
self.noise = None
self.phot_sky = None
self.phot_rms = None
self.apertures_frame = None
self.apertures_mean_frame = None
self.apertures_noise_frame = None
# -----------------------------------------------------------------
def setup(self, frame, principal_shape, sources_mask, extra_mask=None, saturation_region=None, animation=None):
"""
This function ...
:param frame:
:param principal_shape:
:param sources_mask:
:param extra_mask:
:param saturation_region:
:param animation:
:return:
"""
# Call the setup function of the base class
super(SkySubtractor, self).setup()
# Make a local reference to the image frame
self.frame = frame
# Make a reference to the principal shape
self.principal_shape = principal_shape
# Set the masks
self.sources_mask = sources_mask
self.extra_mask = extra_mask
# Set the saturation_region
self.saturation_region = saturation_region
# Make a reference to the animation
self.animation = animation
# -----------------------------------------------------------------
def create_region(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the sky region ...")
# If the sky region has to be loaded from file
if self.config.sky_region is not None:
sky_region = SkyRegion.from_file(self.config.sky_region)
self.region = sky_region.to_pixel(self.frame.wcs)
# If no region file is given by the user, create an annulus from the principal ellipse
else:
# Create the sky annulus
annulus_outer_factor = self.config.mask.annulus_outer_factor
annulus_inner_factor = self.config.mask.annulus_inner_factor
inner_shape = self.principal_shape * annulus_inner_factor
outer_shape = self.principal_shape * annulus_outer_factor
# Create the annulus
annulus = Composite(outer_shape, inner_shape)
# Create the sky region consisting of only the annulus
self.region = Region()
self.region.append(annulus)
# -----------------------------------------------------------------
def create_mask(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the sky mask ...")
# Create a mask from the pixels outside of the sky region
outside_mask = self.region.to_mask(self.frame.xsize, self.frame.ysize).inverse()
# Create a mask from the principal shape
principal_mask = self.principal_shape.to_mask(self.frame.xsize, self.frame.ysize)
#plotting.plot_mask(outside_mask, title="outside mask")
#plotting.plot_mask(principal_mask, title="principal mask")
#plotting.plot_mask(self.sources_mask, title="sources mask")
# Set the mask, make a copy of the input mask initially
self.mask = self.sources_mask + outside_mask + principal_mask
# Add the extra mask (if specified)
if self.extra_mask is not None: self.mask += self.extra_mask
# Check whether saturation contours are defined
if self.saturation_region is not None:
# Expand all contours
expanded_region = self.saturation_region * 1.5
# Create the saturation mask
saturation_mask = expanded_region.to_mask(self.frame.xsize, self.frame.ysize)
self.mask += saturation_mask
# -----------------------------------------------------------------
def sigma_clip(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Performing sigma-clipping on the pixel values ...")
### TEMPORARY: WRITE OUT MASK BEFORE CLIPPING
# Create a frame where the objects are masked
#frame = copy.deepcopy(self.frame)
#frame[self.mask] = float(self.config.writing.mask_value)
# Save the masked frame
#frame.save("masked_sky_frame_notclipped.fits")
###
# Create the sigma-clipped mask
self.mask = statistics.sigma_clip_mask(self.frame, self.config.sigma_clipping.sigma_level, self.mask)
# -----------------------------------------------------------------
def estimate(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky ...")
# Estimate the sky by taking the mean value of all pixels that are not masked
if self.config.estimation.method == "mean": self.estimate_sky_mean()
# Estimate the sky by taking the median value of all pixels that are not masked
elif self.config.estimation.method == "median": self.estimate_sky_median()
# The sky should be estimated by fitting a polynomial function to the pixels
elif self.config.estimation.method == "polynomial": self.estimate_sky_polynomial()
# Use photutils to estimate the sky and sky noise
elif self.config.estimation.method == "photutils": self.estimate_sky_photutils()
# Use our own method to estimate the sky and sky noise
elif self.config.estimation.method == "pts": self.estimate_sky_pts()
# Unkown sky estimation method
else: raise ValueError("Unknown sky estimation method")
# -----------------------------------------------------------------
def estimate_sky_mean(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky by calculating the mean value of all non-masked pixels ...")
# Create a frame filled with the mean value
self.sky = self.mean
# -----------------------------------------------------------------
def estimate_sky_median(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky by calculating the median value of all non-masked pixels ...")
# Create a frame filled with the median value
self.sky = self.median
# -----------------------------------------------------------------
def estimate_sky_polynomial(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky by fitting a polynomial function to all non-masked pixels ...")
polynomial = fitting.fit_polynomial(self.frame, 3, mask=self.mask)
# Evaluate the polynomial
data = fitting.evaluate_model(polynomial, 0, self.frame.xsize, 0, self.frame.ysize)
#plotting.plot_box(data, title="estimated sky")
# Create sky map
# data, wcs=None, name=None, description=None, unit=None, zero_point=None, filter=None, sky_subtracted=False, fwhm=None
self.sky = Frame(data,
wcs=self.frame.wcs,
name="sky",
description="estimated sky",
unit=self.frame.unit,
zero_point=self.frame.zero_point,
filter=self.frame.filter,
sky_subtracted=False,
fwhm=self.frame.fwhm)
# -----------------------------------------------------------------
def estimate_sky_photutils(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky and sky noise by using photutils ...")
bkg = Background(self.frame, (50, 50), filter_shape=(3, 3), filter_threshold=None, mask=self.mask,
method="sextractor", backfunc=None, interp_order=3, sigclip_sigma=3.0, sigclip_iters=10)
# Masked background
masked_background = np.ma.masked_array(bkg.background, mask=self.mask)
#plotting.plot_box(masked_background, title="masked background")
mean_sky = np.ma.mean(masked_background)
median_sky = np.median(masked_background.compressed())
# data, wcs=None, name=None, description=None, unit=None, zero_point=None, filter=None, sky_subtracted=False, fwhm=None
self.phot_sky = Frame(bkg.background,
wcs=self.frame.wcs,
name="phot_sky",
description="photutils background",
unit=self.frame.unit,
zero_point=self.frame.zero_point,
filter=self.frame.filter,
sky_subtracted=False,
fwhm=self.frame.fwhm)
# data, wcs=None, name=None, description=None, unit=None, zero_point=None, filter=None, sky_subtracted=False, fwhm=None
self.phot_rms = Frame(bkg.background_rms,
wcs=self.frame.wcs,
name="phot_rms",
description="photutils rms",
unit=self.frame.unit,
zero_point=self.frame.zero_point,
filter=self.frame.filter,
sky_subtracted=False,
fwhm=self.frame.fwhm)
# Set sky level
self.sky = median_sky
# -----------------------------------------------------------------
def estimate_sky_pts(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky and sky noise by using or own procedures ...")
# Check whether the FWHM is defined for the frame
if self.frame.fwhm is None: raise RuntimeError("The FWHM of the frame is not defined: sky apertures cannot be generated")
# Determine the aperture radius
aperture_radius = self.determine_aperture_radius()
# Determine the number of apertures to use
napertures = self.determine_number_of_apertures(aperture_radius)
# Generate the apertures
aperture_centers, aperture_means, aperture_stddevs = self.generate_apertures(aperture_radius, napertures)
# Remove outliers
aperture_centers, aperture_means, aperture_stddevs = self.remove_aperture_outliers(aperture_centers, aperture_means, aperture_stddevs)
# Calculate the large-scale variation level
large_scale_variations_error = aperture_means.std()
# Calculate the mean pixel-by-pixel noise over all apertures
pixel_to_pixel_noise = np.mean(aperture_stddevs)
# Determine the median sky level
self.sky = np.median(aperture_means)
# Determine the noise by quadratically adding the large scale variation and the mean pixel-by-pixel noise
self.noise = np.sqrt(large_scale_variations_error**2 + pixel_to_pixel_noise**2)
# Debugging
log.debug("The estimated sky level is " + str(self.sky))
log.debug("The estimated sky noise level is " + str(self.noise))
# Create aperture frames
self.create_aperture_frames(aperture_centers, aperture_means, aperture_stddevs, aperture_radius)
# Finishing step
if self.config.estimation.finishing_step is None: pass
elif self.config.estimation.finishing_step == "polynomial": self.fit_polynomial_to_apertures(aperture_centers, aperture_means)
elif self.config.estimation.finishing_step == "interpolation": self.interpolate_apertures(aperture_centers, aperture_means)
else: raise ValueError("Invalid finishing step")
#self.plot_interpolated(aperture_centers, aperture_means)
#self.try_to_interpolate_smart(aperture_centers, aperture_means)
# -----------------------------------------------------------------
def determine_aperture_radius(self):
"""
This function ...
:return:
"""
# Determine the radius for the sky apertures
fwhm_pix = self.frame.fwhm_pix
radius = 4.0 * fwhm_pix
# Debugging
log.debug("Using sky apertures with a radius of " + str(radius) + " pixels")
# Return the aperture radius
return radius
# -----------------------------------------------------------------
def determine_number_of_apertures(self, radius):
"""
This function ...
:param radius:
:return:
"""
npixels = np.sum(self.mask.inverse())
# Assuming optimal hexagonal packing, get an estimate of the maximum number of circles of given radius
# can fit in the area covered by the pixels that are not masked. This is obviously a significant overestimation
# especially in the case where the radius becomes of the same order of magnitude as the radius of the
# galaxy annulus (the hexagonal packing assumes a rectangular area or at least rectangular-like edges)
# With perfect hexagonal packing, the area of the rectangle that will be covered by the circles is π/(2√3),
# which is approximately equal to 0.907
# See: https://www.quora.com/How-many-3-75-inch-circles-will-fit-inside-a-17-inch-square
coverable_area = 0.907 * npixels
circle_area = np.pi * radius ** 2
optimal_number_of_apertures = coverable_area / circle_area
# Debugging
log.debug("The upper limit to the number of apertures that fit in the part of the frame that is not masked "
"(assuming hexagonal packing) is " + str(optimal_number_of_apertures))
# Determine the number of apertures that are going to be used, take a third of the upper limit
napertures = int(optimal_number_of_apertures / 3.)
# Debugging
log.debug("A total of " + str(napertures) + " apertures are going to be used to estimate the sky ...")
# Return the number of apertures
return napertures
# -----------------------------------------------------------------
def generate_apertures(self, radius, napertures):
"""
This function ...
:param radius:
:param napertures:
:return:
"""
circle_area = np.pi * radius ** 2
# Get arrays of the coordinates of all pixels that are not masked
pixels_y, pixels_x = np.where(self.mask.inverse())
# Get the number of pixels that are not masked (also the area of the frame not masked)
npixels = pixels_x.size
# Create a mask that tags all pixels that have been covered by one of the apertures
apertures_mask = Mask.empty_like(self.frame)
# Counter to keep track of the number of 'succesful' apertures that have been used
current_napertures = 0
# Initialize lists to contain the mean sky levels and noise levels in each of the apertures
aperture_centers = []
aperture_means = []
aperture_stddevs = []
# Draw 100 random coordinates
while True:
# Draw a random pixel index
index = np.random.randint(npixels)
# Get the x and y coordinate of the pixel
x = pixels_x[index]
y = pixels_y[index]
# Create a coordinate for the center of the aperture
center = Coordinate(x, y)
# Create a circular aperture
circle = Circle(center, radius)
# Create a Source from the frame
source = Source.from_shape(self.frame, circle, 1.3)
# Get a mask of the pixels that overlap with the sky mask
sky_mask_cutout = self.mask[source.y_slice, source.x_slice]
overlapping = sky_mask_cutout * source.mask
# Calculate the overlap fraction with the sky mask
number_of_overlapping_pixels = np.sum(overlapping)
overlap_fraction = number_of_overlapping_pixels / circle_area
# If the overlap fraction is larger than 50% for this aperture, skip it
if overlap_fraction >= 0.5:
log.debug(
"For this aperture, an overlap fraction of more than 50% was found with the sky mask, skipping ...")
continue
# Get a mask of the pixels that overlap with the apertures mask
apertures_mask_cutout = apertures_mask[source.y_slice, source.x_slice]
overlapping = apertures_mask_cutout * source.mask
# Calculate the overlap fraction with the apertures mask
number_of_overlapping_pixels = np.sum(overlapping)
overlap_fraction = number_of_overlapping_pixels / circle_area
# If the overlap fraction is larger than 10% for this aperture, skip it
if overlap_fraction >= 0.1:
log.debug(
"For this aperture, an overlap fraction of more than 10% was found with other apertures, skipping ...")
# Add the aperture area to the mask
apertures_mask[source.y_slice, source.x_slice] += source.mask
# Debugging
log.debug("Placed aperture " + str(current_napertures+1) + " of " + str(napertures) + " ({0:.2f}%)".format((current_napertures+1)/napertures*100.))
if self.animation is not None:
plt.figure()
plt.imshow(apertures_mask, origin="lower")
plt.title("Aperture mask")
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
im = imageio.imread(buf)
buf.close()
self.animation.add_frame(im)
# Calculate the mean sky value in this aperture
masked_array_cutout = np.ma.MaskedArray(source.cutout, mask=sky_mask_cutout + source.background_mask)
# plotting.plot_box(masked_array_cutout)
aperture_mean = np.ma.mean(masked_array_cutout)
#aperture_median = np.ma.median(masked_array_cutout)
# aperture_median2 = np.median(masked_array_cutout.compressed()) # same result, but unnecessary compressed step
aperture_stddev = np.std(masked_array_cutout)
# print("aperture mean:", aperture_mean)
# print("aperture median:", aperture_median, aperture_median2)
# print("aperture stddev:", aperture_std)
aperture_centers.append(center)
aperture_means.append(aperture_mean)
aperture_stddevs.append(aperture_stddev)
# Another succesful aperture
current_napertures += 1
# Stop when we have reached the desired number of apertures
if current_napertures == napertures: break
# Create Numpy arrays from the aperture means and standard deviations
aperture_means = np.array(aperture_means)
aperture_stddevs = | np.array(aperture_stddevs) | numpy.array |
# -*- coding: utf-8 -*-
"""Models module."""
import re
from abc import ABC, abstractmethod
from copy import copy
from typing import Any, Tuple, Iterable, Optional
from inspect import signature
import numpy as np
import pandas as pd
import sklearn
from scipy.sparse import issparse
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import anndata
from cellrank.tools._utils import save_fig
from cellrank.utils._utils import _minmax
from cellrank.tools._lineage import Lineage
from cellrank.tools._constants import LinKey
_dup_spaces = re.compile(r" +")
class Model(ABC):
"""
Base class for other model classes.
Params
------
adata : :class:`anndata.AnnData`
Annotated data object.
model
Underlying model.
weight_name
Name of the weight argument for :paramref:`model`.
"""
def __init__(
self, adata: anndata.AnnData, model: Any, weight_name: Optional[str] = None
):
self._adata = adata
self._model = model
self.weight_name = weight_name
self._x_all = None
self._y_all = None
self._w_all = None
self._x = None
self._y = None
self._w = None
self._x_test = None
self._y_test = None
self._x_hat = None
self._y_hat = None
self._conf_int = None
self._dtype = np.float32
@property
def adata(self) -> anndata.AnnData:
"""Annotated data object."""
return self._adata
@property
def model(self) -> Any:
"""Underlying model."""
return self._model
@property
def x_all(self) -> np.ndarray:
"""Original independent variables."""
return self._x_all
@property
def y_all(self) -> np.ndarray:
"""Original dependent variables."""
return self._y_all
@property
def w_all(self) -> np.ndarray:
"""Original weights."""
return self._w_all
@property
def x(self) -> np.ndarray:
"""Independent variables used for model fitting."""
return self._x
@property
def y(self) -> np.ndarray:
"""Dependent variables used for model fitting."""
return self._y
@property
def w(self) -> np.ndarray:
"""Weights of independent variables used for model fitting."""
return self._w
@property
def x_test(self) -> np.ndarray:
"""Independent variables used for prediction."""
return self._x_test
@property
def y_test(self) -> np.ndarray:
"""Predicted values."""
return self._y_test
@property
def x_hat(self) -> np.ndarray:
"""Independent variables used when calculating default confidence interval."""
return self._x_hat
@property
def y_hat(self) -> np.ndarray:
"""Dependent variables used when calculating default confidence interval."""
return self._y_hat
@property
def conf_int(self) -> np.ndarray:
"""Confidence interval."""
return self._conf_int
@abstractmethod
def __copy__(self) -> "Model":
pass
def prepare(
self,
gene: str,
lineage_name: str,
data_key: str = "X",
final: bool = True,
time_key: str = "latent_time",
start_lineage: Optional[str] = None,
end_lineage: Optional[str] = None,
threshold: Optional[float] = None,
weight_threshold: float = 0.02,
weight_scale: float = 1,
filter_data: float = False,
n_test_points: int = 200,
) -> "Model":
"""
Prepare the model to be ready for fitting.
Params
------
gene
Gene in :paramref:`adata` `.var_names`.
lineage_name
Name of a lineage in :paramref:`adata` `.uns`:paramref:`lineage_key`.
data_key
Key in :attr:`paramref.adata` `.layers` or `'X'` for :paramref:`adata` `.X`
final
Whether to consider cells going to final states or vice versa.
time_key
Key in :paramref:`adata` `.obs` where the pseudotime is stored.
start_lineage
Lineage from which to select cells with lowest pseudotime as starting points.
If specified, the trends start at the earliest pseudotime within that lineage,
otherwise they start from time `0`.
end_lineage
Lineage from which to select cells with highest pseudotime as endpoints.
If specified, the trends end at the latest pseudotime within that lineage,
otherwise, it is determined automatically.
threshold
Consider only cells with :paramref:`weights` > :paramref:`threshold` when estimating the testing endpoint.
If `None`, use median of :paramref:`w`.
weight_threshold
Set all weights below this to :paramref:`weight_scale` * :paramref:`weight_threshold`.
weight_scale
Weight threshold scale, see :paramref:`weight_threshold`.
filter_data
Use only testing points for fitting.
n_test_points
Number or test points.
if `None`, use the original points based on :paramref:`threshold`.
Returns
-------
None
Nothing, but updates the following fields:
- :paramref:`x`
- :paramref:`y`
- :paramref:`w`
- :paramref:`x_test`
"""
if data_key not in ["X", "obs"] + list(self.adata.layers.keys()):
raise KeyError(
f"Data key must be a key of `adata.layers`: `{list(self.adata.layers.keys())}`, '`obs`' or `'X'`."
)
if time_key not in self.adata.obs:
raise KeyError(f"Time key `{time_key!r}` not found in `adata.obs`.")
if data_key != "obs":
if gene not in self.adata.var_names:
raise KeyError(f"Gene `{gene!r}` not found in `adata.var_names`.")
else:
if gene not in self.adata.obs:
raise KeyError(f"Unable to find key `{gene!r}` in `adata.obs`.")
lineage_key = str(LinKey.FORWARD if final else LinKey.BACKWARD)
if lineage_key not in self.adata.obsm:
raise KeyError(f"Lineage key `{lineage_key!r}` not found in `adata.obsm`.")
if not isinstance(self.adata.obsm[lineage_key], Lineage):
raise TypeError(
f"Expected `adata.obsm[{lineage_key!r}]` to be of type `cellrank.tl.Lineage`, "
f"found `{type(self.adata.obsm[lineage_key]).__name__}`."
)
if lineage_name is not None:
_ = self.adata.obsm[lineage_key][lineage_name]
if start_lineage is not None:
if start_lineage not in self.adata.obsm[lineage_key].names:
raise KeyError(
f"Start lineage `{start_lineage!r}` not found in `adata.obsm[{lineage_key!r}].names`."
)
if end_lineage is not None:
if end_lineage not in self.adata.obsm[lineage_key].names:
raise KeyError(
f"End lineage `{end_lineage!r}` not found in `adata.obsm[{lineage_key!r}].names`."
)
x = np.array(self.adata.obs[time_key]).astype(np.float64)
gene_ix = np.where(self.adata.var_names == gene)[0]
if data_key == "X":
y = self.adata.X[:, gene_ix]
elif data_key == "obs":
y = self.adata.obs[gene].values
elif data_key in self.adata.layers:
y = self.adata.layers[data_key][:, gene_ix]
else:
raise NotImplementedError(
f"Data key `{data_key!r}` is not yet implemented."
)
if issparse(y):
y = np.asarray(y.todense())
y = np.squeeze(y).astype(np.float64)
if lineage_name is not None:
w = (
np.array(self.adata.obsm[lineage_key][lineage_name])
.astype(self._dtype)
.squeeze()
)
w[w < weight_threshold] = np.clip(weight_threshold * weight_scale, 0, 1)
else:
w = np.ones_like(x)
self._x_all, self._y_all, self._w_all = x[:], y[:], w[:]
x, ixs = np.unique(x, return_index=True)
y = y[ixs]
w = w[ixs]
ixs = np.argsort(x)
x, y, w = x[ixs], y[ixs], w[ixs]
if start_lineage is None or (start_lineage == lineage_name):
val_start = np.min(self.adata.obs[time_key])
else:
from_key = "_".join(lineage_key.split("_")[1:])
val_start = np.nanmin(
self.adata.obs[time_key][self.adata.obs[from_key] == start_lineage]
)
if end_lineage is None or (end_lineage == lineage_name):
if threshold is None:
threshold = np.nanmedian(w)
w_test = w[w > threshold]
tmp = np.convolve(w_test, np.ones(8) / 8, mode="same")
val_end = x[w > threshold][np.nanargmax(tmp)]
else:
to_key = "_".join(lineage_key.split("_")[1:])
val_end = np.nanmax(
self.adata.obs[time_key][self.adata.obs[to_key] == end_lineage]
)
if val_start > val_end:
val_start, val_end = val_end, val_start
x_test = (
np.linspace(val_start, val_end, n_test_points)
if n_test_points is not None
else x[(x >= val_start) & (x <= val_end)]
)
if filter_data:
fil = (x >= val_start) & (x <= val_end)
x, y, w = x[fil], y[fil], w[fil]
self._x, self._y, self._w = (
self._convert(x[:]),
self._convert(y[:]),
self._convert(w[:]).squeeze(-1),
)
self._x_test = self._convert(x_test[:])
return self
def _convert(self, value: np.ndarray) -> np.ndarray:
was_1d = value.ndim == 1
value = | np.atleast_2d(value) | numpy.atleast_2d |
"""
Author: <NAME>
Email: <EMAIL>
This is a collection of plotting functions for plotting the input/output data
from a 3DLIM run.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
import netCDF4
from scipy.optimize import curve_fit
from matplotlib import colors
from matplotlib import patches
from matplotlib import ticker
# Some plot properties to make them a bit nicer.
plt.ion()
#plt.rcParams['font.family'] = 'serif'
fontsize = 12
ms = 2
lw = 5
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the tableau20 RGBs to numbers between (0,1) since this is how mpl accepts them.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
class LimPlots:
"""
Class object to store data and plotting routines for a 3DLIM run.
"""
def __init__(self, ncpath, combine_repeat_runs=True):
"""
Just intialize with the netCDF file. Will assume the lim and dat file
are in the same folder.
ncpath: Path to the NetCDF file.
combine_repeat_runs: Combine the NERODS3 arrays from multiple repeat
runs together for improved statistics. This assumes the following
naming convention: ncfile, ncfile2, ncfile3, ...
"""
# Save the repeat runs flags for later.
self.combine_repeat_runs = combine_repeat_runs
self.ncpath = ncpath
# Just a default file for testing.
if ncpath == 'test':
ncpath = 'colprobe-z1-001e.nc'
# Load in netcdf file.
self.nc = netCDF4.Dataset(ncpath)
# Load in the lim file.
limpath = ncpath[:-2] + 'lim'
with open(limpath) as f:
self.lim = f.read()
# load in the dat file.
datpath = ncpath[:-2] + 'dat'
with open(datpath) as f:
self.dat = f.read()
# Save file name.
self.file = ncpath.split('/')[-1][:-3]
# Save case name.
self.case = ''
for b in self.nc.variables['TITLE'][:].data:
self.case = self.case + b.decode('UTF-8')
def __repr__(self):
message = 'LimPlots Object\n' + \
' Case: ' + self.case + '\n' + \
' File: ' + self.file + '\n'
return message
def summary(self):
# Output dictionary to print out results easier.
output = dict()
# Case info.
output['Case'] = self.case
output['File'] = self.file
# Time for run in hours.
time = int(self.dat.split('TOTAL CPU TIME USED (S)')[1].split('\n')[0])
output['Time'] = str(time) + 's (' + format(time/3600, '.2f') + ' hours)'
# Number of impurities followed.
num = int(self.dat.split('NO OF IMPURITY IONS TO FOLLOW')[1].split('\n')[0])
output['Ions Followed'] = "{:,}".format(num)
# Find longest output for formatting.
pad = 0
for val in output.values():
if len(str(val)) > pad:
pad = len(str(val))
# Printing commands.
num_stars = 2 + 15 + 2 + pad
print("\n" + "*" * num_stars)
for key, val in output.items():
print("* {:15}{:<{pad}} *".format(key, val, pad=pad))
print("*" * num_stars)
def get_dep_array(self, num_runs=399):
"""
Load the deposition arrays for the collector probes.
To-Do
- Add option to combine the arrays of multiple repeat runs.
"""
# Only load it once. Keep track if it's already been loaded by trying
# to see if it's been defined yet.
try:
self.dep_arr
# Not defined, so load it.
except AttributeError:
if self.combine_repeat_runs:
# Try and get the dep_arr from the base case. If it doesn't exist,
# that means nothing landed on the probe and 3DLIM won't save
# and array of all zeros apparently. So just create the dep_arr
# of all zeros.
try:
dep_arr = np.array(self.nc.variables['NERODS3'][0] * -1)
except:
dep_arr = np.zeros((6, 2*self.nc.variables['MAXNPS'][:]+1, self.nc.variables['MAXOS'][:]))
print(" No NERODS3.")
# Add on contributions from repeat runs.
for i in range(1, num_runs):
try:
ncpath_add = self.ncpath.split('.nc')[0] + str(i) + '.nc'
#print('Looking for {}...'.format(ncpath_add))
nc = netCDF4.Dataset(ncpath_add)
print("Found additional run: {}".format(ncpath_add))
try:
dep_arr = dep_arr + np.array(nc.variables['NERODS3'][0] * -1)
except KeyError:
print(" No NERODS3.")
except:
pass
else:
# Create the deposition array for the initial file.
dep_arr = np.array(self.nc.variables['NERODS3'][0] * -1)
# Define dep_arr so next time you won't have to choose all the file
# locations.
self.dep_arr = dep_arr
return self.dep_arr
def centerline(self, log=False, fit_exp=False, plotnum=0, show_plot=True):
"""
Plot the ITF and OTF deposition along the centerlines on the same plot.
log: Option to make y axis a log scale.
fit_exp: Do an exponential fit onto the data and get the lambdas.
To-Do
- Add option so ITF/OTF is only over, say, first 5 cm.
"""
#The deposition array.
dep_arr = self.get_dep_array()
# Location of each P bin, and its width.
ps = np.array(self.nc.variables['PS'][:].data)
pwids = np.array(self.nc.variables['PWIDS'][:].data)
# Array of poloidal locations (i.e. the center of each P bin).
pol_locs = ps - pwids/2.0
# Distance cell centers along surface (i.e. the radial locations).
rad_locs = np.array(self.nc.variables['ODOUTS'][:].data)
# Get the centerline index (or closest to it).
cline = np.abs(pol_locs).min()
# Index the deposition array at the centerline for plotting.
itf_x = rad_locs[np.where(rad_locs > 0.0)[0]]
itf_y = dep_arr[np.where(pol_locs == cline)[0], np.where(rad_locs > 0.0)[0]]
otf_x = rad_locs[np.where(rad_locs < 0.0)[0]] * -1
otf_y = dep_arr[np.where(pol_locs == cline)[0], np.where(rad_locs < 0.0)[0]]
# Plotting commands.
if plotnum == 0:
if show_plot:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = self.master_fig.axes[plotnum-1]
# Option for a log axis.
if show_plot:
if log:
ax.semilogy(itf_x*100, itf_y, '-', label='ITF', ms=ms, color=tableau20[6])
ax.semilogy(otf_x*100, otf_y, '-', label='OTF', ms=ms, color=tableau20[8])
else:
ax.plot(itf_x*100, itf_y, '-', label='ITF', ms=ms, color=tableau20[6])
ax.plot(otf_x*100, otf_y, '-', label='OTF', ms=ms, color=tableau20[8])
# The tips of the probes can have spuriously high data points, so set the
# may for the ylim just the second highest number in the datasets.
ymax = sorted(np.concatenate((otf_y, itf_y)))[-2]
ax.legend(fontsize=fontsize)
ax.set_xlabel('Distance along probe (cm)', fontsize=fontsize)
ax.set_ylabel('Deposition (arbitrary units)', fontsize=fontsize)
#ax.set_xlim([0, 10])
#ax.set_ylim([0, ymax])
# Option to perform an exponential fit to the data.
if fit_exp:
def exp_fit(x, a, b):
return a * np.exp(-b * x)
popt_itf, pcov_itf = curve_fit(exp_fit, itf_x, itf_y, maxfev=5000)
popt_otf, pcov_otf = curve_fit(exp_fit, otf_x, otf_y, maxfev=5000)
fitx = np.linspace(0, 0.1, 100)
fity_itf = exp_fit(fitx, *popt_itf)
fity_otf = exp_fit(fitx, *popt_otf)
if show_plot:
if log:
ax.semilogy(fitx*100, fity_itf, '--', ms=ms, color=tableau20[6])
ax.semilogy(fitx*100, fity_otf, '--', ms=ms, color=tableau20[8])
else:
ax.plot(fitx*100, fity_itf, '--', ms=ms, color=tableau20[6])
ax.plot(fitx*100, fity_otf, '--', ms=ms, color=tableau20[8])
print("Lambdas")
print(" ITF = {:.2f}".format(1/popt_itf[1]*100))
print(" OTF = {:.2f}".format(1/popt_otf[1]*100))
if plotnum ==0:
if show_plot:
fig.tight_layout()
fig.show()
print("Center ITF/OTF: {:.2f}".format(itf_y.sum()/otf_y.sum()))
return {'itf_x':itf_x, 'itf_y':itf_y, 'otf_x':otf_x, 'otf_y':otf_y}
def deposition_contour(self, side, probe_width=0.015, rad_cutoff=0.05,
plotnum=0, vmax=None, print_ratio=True):
"""
Plot the 2D tungsten distribution across the face.
side: Either 'ITF' or 'OTF'.
probe_width: The half-width of the collector probe (the variable CPCO).
A = 0.015, B = 0.005, C = 0.0025
rad_cutoff: Only plot data from the tip down to rad_cutoff. Useful
if we want to compare to LAMS since those scans only go
down a certain length of the probe.
*** To-Do ***
- Instead of entering the width, pull out CPCO(?) from the netcdf file.
Need to figure out the points being deposited outside the expected
probe width first though.
- Print out the ITF/OTF ratio from this analysis.
"""
#The deposition array.
dep_arr = self.get_dep_array()
# Location of each P bin, and its width. Currently they all have the same width,
# but it may end up such that there are custom widths so we leave it like this.
ps = np.array(self.nc.variables['PS'][:].data)
pwids = np.array(self.nc.variables['PWIDS'][:].data)
# Array of poloidal locations (i.e. the center of each P bin).
pol_locs = ps - pwids/2.0
# Distance cell centers along surface (i.e. the radial locations).
rad_locs = np.array(self.nc.variables['ODOUTS'][:].data)
# Remove data beyond rad_cutoff.
idx = np.where(np.abs(rad_locs)<rad_cutoff)[0]
rad_locs = rad_locs[idx]
dep_arr = dep_arr[:, idx]
# Seems a junk number can sneak into PS and PWIDS. Clean that up.
idx = np.where(ps < 9999)[0]
pol_locs = pol_locs[idx]
dep_arr = dep_arr[idx]
# Get only positive values of rad_locs for ITF...
idx = np.where(rad_locs > 0.0)[0]
X_itf, Y_itf = np.meshgrid(rad_locs[idx], pol_locs)
Z_itf = dep_arr[:, idx]
# ... negative for OTF.
idx = np.where(rad_locs < 0.0)[0]
X_otf, Y_otf = np.meshgrid(np.abs(rad_locs[idx][::-1]), pol_locs)
Z_otf = dep_arr[:, idx][:, ::-1]
# Make the levels for the contour plot out of whichever side has the max deposition.
if vmax == None:
if Z_itf.max() > Z_otf.max():
levels = np.linspace(0, Z_itf.max(), 15)
else:
levels = np.linspace(0, Z_otf.max(), 15)
else:
levels = np.linspace(0, vmax, 15)
# Plotting commands.
if side == 'ITF':
X = X_itf; Y = Y_itf; Z = Z_itf
else:
X = X_otf; Y = Y_otf; Z = Z_otf
if plotnum == 0:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = self.master_fig.axes[plotnum-1]
ax.contourf(X*100, Y*100, Z, levels=levels, cmap='Reds')
ax.set_xlabel('Distance along probe (cm)', fontsize=fontsize)
ax.set_ylabel('Z location (cm)', fontsize=fontsize)
ax.set_ylim([-probe_width*100, probe_width*100])
props = dict(facecolor='white')
ax.text(0.75, 0.85, side, bbox=props, fontsize=fontsize*1.5, transform=ax.transAxes)
if plotnum == 0:
fig.tight_layout()
fig.show()
if print_ratio:
print('Total ITF/OTF (0-{} cm): {:.2f}'.format(rad_cutoff*100, Z_itf.sum()/Z_otf.sum()))
def avg_pol_profiles(self, probe_width=0.015, rad_cutoff=0.5, plotnum=0):
"""
Plot the average poloidal profiles for each side. Mainly to see if
deposition peaks on the edges.
probe_width: The half-width of the collector probe (the variable CPCO).
A = 0.015, B = 0.005, C = 0.0025
rad_cutoff: Only plot data from the tip down to rad_cutoff. Useful
if we want to compare to LAMS since those scans only go
down a certain length of the probe.
"""
# Code copied from above function, deposition_contour. See for comments.
dep_arr = np.array(self.nc.variables['NERODS3'][0] * -1)
ps = np.array(self.nc.variables['PS'][:].data)
pwids = np.array(self.nc.variables['PWIDS'][:].data)
pol_locs = ps - pwids/2.0
dep_arr = dep_arr[:-1, :]
pol_locs = pol_locs[:-1]
rad_locs = np.array(self.nc.variables['ODOUTS'][:].data)
idx = np.where(np.abs(rad_locs)<rad_cutoff)[0]
rad_locs = rad_locs[idx]
dep_arr = dep_arr[:, idx]
idx = np.where(rad_locs > 0.0)[0]
X_itf, Y_itf = np.meshgrid(rad_locs[idx], pol_locs)
Z_itf = dep_arr[:, idx]
idx = np.where(rad_locs < 0.0)[0]
X_otf, Y_otf = np.meshgrid( | np.abs(rad_locs[idx][::-1]) | numpy.abs |
import os
import sys
from difflib import SequenceMatcher
from pyproj import Proj, transform
import numpy as np
import pandas as pd
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def extract_loc(t_array, gps_data):
_xy = gps_data[[0, -1]]
pct = ((t_array * 1.0) / np.max(t_array))[:, np.newaxis]
pos = pct * (_xy[1] - _xy[0])[np.newaxis, :] + _xy[0]
return pos
def proc_detection(c_angle, view_extend_dist, f_lpr, f_traj, f_gps):
# get walking trajectory
WGS = Proj(init="epsg:4326")
utm_11 = Proj(init="epsg:26911")
df = pd.read_csv(f_lpr)
df_loc = pd.read_csv(f_gps, names=['lat', 'lon'])
xy = df_loc.apply(lambda r: transform(WGS, utm_11, r['lon'], r['lat']),
axis=1)
df_loc['x'] = xy.apply(lambda x: x[0])
df_loc['y'] = xy.apply(lambda x: x[1])
gps = df_loc[['x', 'y']].values
ts = df['time'].values
pos_intp = extract_loc(ts, gps)
# get camera location for each video frame image
traj = np.apply_along_axis(lambda x: transform(utm_11, WGS, x[0], x[1]),
1, np.unique(pos_intp, axis=0))
traj[:, [0, 1]] = traj[:, [1, 0]]
traj = traj[::-1]
traj = np.append(traj, np.unique(ts)[:, np.newaxis], axis=1)
traj = pd.DataFrame(traj, columns=['lat', 'lon', 't_vid'])
with open(f_traj, 'w') as wrt:
str_l = []
for row in traj.values:
str_l.append("%f,%f" % (row[1], row[0]))
wrt.write("%s" % ';'.join(str_l))
# get vehical location for each image
unit = pos_intp[-1] - pos_intp[0]
unit = unit[:, np.newaxis]
unit = unit / np.linalg.norm(unit)
dist = df['dist'].values
dist[dist == -1.0] = np.nan
dist += view_extend_dist
th = np.radians(c_angle)
rot_M = np.array([[np.cos(th), -np.sin(th)], [ | np.sin(th) | numpy.sin |
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
class LogisticRegressionGD:
"""Gradient descent-based logistic regression classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after training.
b_ : Scalar
Bias unit after fitting.
losses_ : list
Mean squared error loss function values in each epoch.
"""
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : Instance of LogisticRegressionGD
"""
rgen = | np.random.RandomState(self.random_state) | numpy.random.RandomState |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
import copy
import json
import math
import os
from absl.testing import parameterized
from keras import backend
from keras import combinations
from keras import keras_parameterized
from keras import layers
from keras import metrics
from keras import Model
from keras import testing_utils
from keras.engine import base_layer
from keras.engine import training as training_module
import numpy as np
import tensorflow.compat.v2 as tf
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasSumTest(tf.test.TestCase, parameterized.TestCase):
def test_sum(self):
with self.test_session():
m = metrics.Sum(name='my_sum')
# check config
self.assertEqual(m.name, 'my_sum')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertLen(m.variables, 1)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(tf.convert_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
# check reset_state()
m.reset_state()
self.assertEqual(self.evaluate(m.total), 0)
def test_sum_with_sample_weight(self):
m = metrics.Sum(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50)
self.assertEqual(self.evaluate(m.total), 50)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52., 4) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 4)
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5, 1) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 1)
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5, 1) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 1)
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 1)
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2)
# result = (prev: 57.5) + 0.5 + 1 + 1.5 + 1 + 0.25 + 2
self.assertAlmostEqual(result, 63.75, 2)
self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2)
def test_sum_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
m = metrics.Sum()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(result, 50)
self.assertEqual(self.evaluate(m.total), 50)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(result, 52., 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 2)
def test_save_restore(self):
with self.test_session():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Sum()
checkpoint = tf.train.Checkpoint(sum=m)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint sum object (= 300)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(600., self.evaluate(m.result()))
# restore to a different checkpoint sum object
restore_sum = metrics.Sum()
restore_checkpoint = tf.train.Checkpoint(sum=restore_sum)
status = restore_checkpoint.restore(save_path)
restore_update = restore_sum(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(600., self.evaluate(restore_sum.result()))
class MeanTest(keras_parameterized.TestCase):
# TODO(b/120949004): Re-enable garbage collection check
# @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
@keras_parameterized.run_all_keras_modes
def test_mean(self):
m = metrics.Mean(name='my_mean')
# check config
self.assertEqual(m.name, 'my_mean')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertEqual(len(m.variables), 2)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state([
tf.convert_to_tensor(1),
tf.convert_to_tensor(5)
])
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
self.assertEqual(self.evaluate(m.count), 3)
# check reset_state()
m.reset_state()
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# Check save and restore config
m2 = metrics.Mean.from_config(m.get_config())
self.assertEqual(m2.name, 'my_mean')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, tf.float32)
self.assertEqual(len(m2.variables), 2)
@testing_utils.run_v2_only
def test_function_wrapped_reset_state(self):
m = metrics.Mean(name='my_mean')
# check reset_state in function.
@tf.function
def reset_in_fn():
m.reset_state()
return m.update_state(100)
for _ in range(5):
self.evaluate(reset_in_fn())
self.assertEqual(self.evaluate(m.count), 1)
@keras_parameterized.run_all_keras_modes
def test_mean_with_sample_weight(self):
m = metrics.Mean(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50 / 0.5)
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52 / 1.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6
self.assertEqual(result, 10.45)
self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
@keras_parameterized.run_all_keras_modes
def test_mean_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
m = metrics.Mean()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
self.assertEqual(result, 50 / 0.5)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
self.assertAlmostEqual(result, 52 / 1.7, 2)
@keras_parameterized.run_all_keras_modes
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Mean()
checkpoint = tf.train.Checkpoint(mean=m)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint mean object
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(200., self.evaluate(m.result()))
# restore to a different checkpoint mean object
restore_mean = metrics.Mean()
restore_checkpoint = tf.train.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(200., self.evaluate(restore_mean.result()))
self.assertEqual(3, self.evaluate(restore_mean.count))
@keras_parameterized.run_all_keras_modes
def test_multiple_instances(self):
m = metrics.Mean()
m2 = metrics.Mean()
self.assertEqual(m.name, 'mean')
self.assertEqual(m2.name, 'mean')
self.assertEqual([v.name for v in m.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count']))
self.assertEqual([v.name for v in m2.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count'], name_suffix='_1'))
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
self.evaluate(tf.compat.v1.variables_initializer(m2.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
self.assertEqual(self.evaluate(m2([63, 10])), 36.5)
self.assertEqual(self.evaluate(m2.total), 73)
self.assertEqual(self.evaluate(m2.count), 2)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
@testing_utils.run_v2_only
def test_deepcopy_of_metrics(self):
m = metrics.Mean(name='my_mean')
m.reset_state()
m.update_state(100)
m_copied = copy.deepcopy(m)
m_copied.update_state(200)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m_copied.result()), 150)
m.reset_state()
self.assertEqual(self.evaluate(m.result()), 0)
self.assertEqual(self.evaluate(m_copied.result()), 150)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasAccuracyTest(tf.test.TestCase):
def test_accuracy(self):
acc_obj = metrics.Accuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# Check save and restore config
a2 = metrics.Accuracy.from_config(acc_obj.get_config())
self.assertEqual(a2.name, 'my_acc')
self.assertTrue(a2.stateful)
self.assertEqual(len(a2.variables), 2)
self.assertEqual(a2.dtype, tf.float32)
# check with sample_weight
result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_accuracy_ragged(self):
acc_obj = metrics.Accuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[1], [2], [3], [4]])
rt2 = tf.ragged.constant([[1], [2], [3], [4]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = tf.ragged.constant([[2], [1]])
rt2 = tf.ragged.constant([[2], [0]])
sw_ragged = tf.ragged.constant([[0.5], [0.2]])
result_t = acc_obj(rt1, rt2, sample_weight=sw_ragged)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_binary_accuracy(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_pred squeeze
update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertAlmostEqual(result, 0.75, 2) # 3/4
# check y_true squeeze
result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4/6
# check with sample_weight
result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
def test_binary_accuracy_ragged(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[1], [0]])
rt2 = tf.ragged.constant([[1], [0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_true squeeze only supported for dense tensors and is
# not supported by ragged tensor (different ranks). --> error
rt1 = tf.ragged.constant([[[1], [1]]])
rt2 = tf.ragged.constant([[1], [0]])
with self.assertRaises(ValueError):
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
def test_binary_accuracy_threshold(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_binary_accuracy_threshold_ragged(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
rt1 = tf.ragged.constant([[1], [1], [0], [0]])
rt2 = tf.ragged.constant([[0.9], [0.6], [0.4], [0.8]])
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_categorical_accuracy(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_categorical_accuracy_ragged(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]])
sample_weight = tf.ragged.constant([[0.5], [0.2]])
with self.assertRaises(tf.errors.InvalidArgumentError):
result_t = acc_obj(rt1, rt2, sample_weight)
result = self.evaluate(result_t)
def test_sparse_categorical_accuracy(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[2], [1]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_ragged(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# verify that correct value is returned
rt1 = tf.ragged.constant([[2], [1]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
with self.assertRaises(tf.errors.InvalidArgumentError):
# sparse_categorical_accuracy is not supported for composite/ragged
# tensors.
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
def test_sparse_categorical_accuracy_mismatched_dims(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
t = tf.compat.v1.placeholder(tf.float32)
p = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
result_t = acc_obj(t, p, w)
result = sess.run(
result_t,
feed_dict=({
t: [2, 1],
p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
w: [[0.5], [0.2]]
}))
self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7
def test_get_acc(self):
acc_fn = metrics.get('acc')
self.assertEqual(acc_fn, metrics.accuracy)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CosineSimilarityTest(tf.test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = tf.constant(self.np_y_true)
self.y_pred = tf.constant(self.np_y_pred)
def test_config(self):
cosine_obj = metrics.CosineSimilarity(
axis=2, name='my_cos', dtype=tf.int32)
self.assertEqual(cosine_obj.name, 'my_cos')
self.assertEqual(cosine_obj._dtype, tf.int32)
# Check save and restore config
cosine_obj2 = metrics.CosineSimilarity.from_config(cosine_obj.get_config())
self.assertEqual(cosine_obj2.name, 'my_cos')
self.assertEqual(cosine_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_weighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true,
self.y_pred,
sample_weight=tf.constant(sample_weight))
expected_loss = np.sum(
self.expected_loss * sample_weight) / np.sum(sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = metrics.CosineSimilarity(axis=1)
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanAbsoluteErrorTest(tf.test.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=tf.int32)
self.assertEqual(mae_obj.name, 'my_mae')
self.assertEqual(mae_obj._dtype, tf.int32)
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, 'my_mae')
self.assertEqual(mae_obj2._dtype, tf.int32)
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mae_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanAbsolutePercentageErrorTest(tf.test.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name='my_mape', dtype=tf.int32)
self.assertEqual(mape_obj.name, 'my_mape')
self.assertEqual(mape_obj._dtype, tf.int32)
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config())
self.assertEqual(mape_obj2.name, 'my_mape')
self.assertEqual(mape_obj2._dtype, tf.int32)
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mape_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mape_obj.result()
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanSquaredErrorTest(tf.test.TestCase):
def test_config(self):
mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=tf.int32)
self.assertEqual(mse_obj.name, 'my_mse')
self.assertEqual(mse_obj._dtype, tf.int32)
# Check save and restore config
mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
self.assertEqual(mse_obj2.name, 'my_mse')
self.assertEqual(mse_obj2._dtype, tf.int32)
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanSquaredLogarithmicErrorTest(tf.test.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name='my_msle', dtype=tf.int32)
self.assertEqual(msle_obj.name, 'my_msle')
self.assertEqual(msle_obj._dtype, tf.int32)
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config())
self.assertEqual(msle_obj2.name, 'my_msle')
self.assertEqual(msle_obj2._dtype, tf.int32)
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = msle_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class HingeTest(tf.test.TestCase):
def test_config(self):
hinge_obj = metrics.Hinge(name='hinge', dtype=tf.int32)
self.assertEqual(hinge_obj.name, 'hinge')
self.assertEqual(hinge_obj._dtype, tf.int32)
# Check save and restore config
hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config())
self.assertEqual(hinge_obj2.name, 'hinge')
self.assertEqual(hinge_obj2._dtype, tf.int32)
def test_unweighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables))
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# reduced metric = (0.6 + 0.4125) / 2
update_op = hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = hinge_obj.result()
self.assertAllClose(0.506, result, atol=1e-3)
def test_weighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables))
y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = tf.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted metric = [0.6 * 1.5, 0.4125 * 2]
# reduced metric = (0.6 * 1.5 + 0.4125 * 2) / (1.5 + 2)
result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.493, self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SquaredHingeTest(tf.test.TestCase):
def test_config(self):
sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=tf.int32)
self.assertEqual(sq_hinge_obj.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj._dtype, tf.int32)
# Check save and restore config
sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config())
self.assertEqual(sq_hinge_obj2.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj2._dtype, tf.int32)
def test_unweighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables))
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# reduced metric = (0.485 + 0.2431) / 2
update_op = sq_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = sq_hinge_obj.result()
self.assertAllClose(0.364, result, atol=1e-3)
def test_weighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables))
y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = tf.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted metric = [0.485 * 1.5, 0.2431 * 2]
# reduced metric = (0.485 * 1.5 + 0.2431 * 2) / (1.5 + 2)
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.347, self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CategoricalHingeTest(tf.test.TestCase):
def test_config(self):
cat_hinge_obj = metrics.CategoricalHinge(
name='cat_hinge', dtype=tf.int32)
self.assertEqual(cat_hinge_obj.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj._dtype, tf.int32)
# Check save and restore config
cat_hinge_obj2 = metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config())
self.assertEqual(cat_hinge_obj2.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj2._dtype, tf.int32)
def test_unweighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cat_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RootMeanSquaredErrorTest(tf.test.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=tf.int32)
self.assertEqual(rmse_obj.name, 'rmse')
self.assertEqual(rmse_obj._dtype, tf.int32)
rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config())
self.assertEqual(rmse_obj2.name, 'rmse')
self.assertEqual(rmse_obj2._dtype, tf.int32)
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))
y_true = tf.constant((2, 4, 6))
y_pred = tf.constant((1, 3, 2))
update_op = rmse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(math.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))
y_true = tf.constant((2, 4, 6, 8))
y_pred = tf.constant((1, 3, 2, 3))
sample_weight = tf.constant((0, 1, 0, 1))
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TopKCategoricalAccuracyTest(tf.test.TestCase):
def test_config(self):
a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=tf.int32)
self.assertEqual(a_obj.name, 'topkca')
self.assertEqual(a_obj._dtype, tf.int32)
a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())
self.assertEqual(a_obj2.name, 'topkca')
self.assertEqual(a_obj2._dtype, tf.int32)
def test_correctness(self):
a_obj = metrics.TopKCategoricalAccuracy()
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([[0, 0, 1], [0, 1, 0]])
y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.TopKCategoricalAccuracy(k=1)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_true = tf.constant([[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]])
y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.TopKCategoricalAccuracy(k=6)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.TopKCategoricalAccuracy(k=2)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = tf.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SparseTopKCategoricalAccuracyTest(tf.test.TestCase):
def test_config(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(
name='stopkca', dtype=tf.int32)
self.assertEqual(a_obj.name, 'stopkca')
self.assertEqual(a_obj._dtype, tf.int32)
a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(
a_obj.get_config())
self.assertEqual(a_obj2.name, 'stopkca')
self.assertEqual(a_obj2._dtype, tf.int32)
def test_correctness(self):
a_obj = metrics.SparseTopKCategoricalAccuracy()
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([2, 1])
y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(k=2)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([1, 0, 2])
y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = tf.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LogCoshErrorTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
logcosh_obj = metrics.LogCoshError(name='logcosh', dtype=tf.int32)
self.assertEqual(logcosh_obj.name, 'logcosh')
self.assertEqual(logcosh_obj._dtype, tf.int32)
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))
update_op = logcosh_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class PoissonTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred))
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
poisson_obj = metrics.Poisson(name='poisson', dtype=tf.int32)
self.assertEqual(poisson_obj.name, 'poisson')
self.assertEqual(poisson_obj._dtype, tf.int32)
poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config())
self.assertEqual(poisson_obj2.name, 'poisson')
self.assertEqual(poisson_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))
update_op = poisson_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KLDivergenceTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))
y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))
self.batch_size = 2
self.expected_results = np.multiply(y_true, np.log(y_true / y_pred))
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
k_obj = metrics.KLDivergence(name='kld', dtype=tf.int32)
self.assertEqual(k_obj.name, 'kld')
self.assertEqual(k_obj._dtype, tf.int32)
k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())
self.assertEqual(k_obj2.name, 'kld')
self.assertEqual(k_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))
update_op = k_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = k_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / (1.2 + 3.4)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanRelativeErrorTest(tf.test.TestCase):
def test_config(self):
normalizer = tf.constant([1, 3], dtype=tf.float32)
mre_obj = metrics.MeanRelativeError(normalizer=normalizer, name='mre')
self.assertEqual(mre_obj.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj.normalizer), [1, 3], 1e-1)
mre_obj2 = metrics.MeanRelativeError.from_config(mre_obj.get_config())
self.assertEqual(mre_obj2.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj2.normalizer), [1, 3], 1e-1)
def test_unweighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_y_pred - np_y_true), np_y_true))
y_pred = tf.constant(np_y_pred, shape=(1, 4), dtype=tf.float32)
y_true = tf.constant(np_y_true, shape=(1, 4))
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_weighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
sample_weight = np.asarray([0.2, 0.3, 0.5, 0], dtype=np.float32)
rel_errors = np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)
expected_error = np.sum(rel_errors * sample_weight)
y_pred = tf.constant(np_y_pred, dtype=tf.float32)
y_true = tf.constant(np_y_true)
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(
y_true, y_pred, sample_weight=tf.constant(sample_weight))
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_zero_normalizer(self):
y_pred = tf.constant([2, 4], dtype=tf.float32)
y_true = tf.constant([1, 3])
mre_obj = metrics.MeanRelativeError(normalizer=tf.zeros_like(y_true))
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertEqual(self.evaluate(result), 0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class IoUTest(tf.test.TestCase):
def test_config(self):
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], name='iou_class_1_0')
self.assertEqual(obj.name, 'iou_class_1_0')
self.assertEqual(obj.num_classes, 2)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.IoU.from_config(obj.get_config())
self.assertEqual(obj2.name, 'iou_class_1_0')
self.assertEqual(obj2.num_classes, 2)
self.assertEqual(obj2.target_class_ids, [1, 0])
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_pred = tf.constant([0, 1, 0, 1], dtype=tf.float32)
y_true = tf.constant([0, 0, 1, 1])
sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])
obj = metrics.IoU(num_classes=2, target_class_ids=[1, 0])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.1 / (0.4 + 0.5 - 0.1) + 0.2 / (0.6 + 0.5 - 0.2)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)
y_true = tf.constant([[0, 0], [1, 1]])
sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
self.assertAllClose(
self.evaluate(obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = tf.constant([1], dtype=tf.float32)
y_true = tf.constant([1])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (1 + 1 - 1)) / 1
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BinaryIoUTest(tf.test.TestCase):
def test_config(self):
obj = metrics.BinaryIoU(
target_class_ids=[1, 0], threshold=0.1, name='iou_class_1_0')
self.assertEqual(obj.name, 'iou_class_1_0')
self.assertAlmostEqual(obj.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.BinaryIoU.from_config(obj.get_config())
self.assertEqual(obj.name, 'iou_class_1_0')
self.assertAlmostEqual(obj2.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
def test_different_thresholds_weighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[0.2, 0.4],
# [0.3, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
sample_weight = tf.constant([0.1, 0.2, 0.4, 0.3])
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[0.1+0.4, 0],
# [0.2, 0.3]]
# sum_row = [0.5, 0.5], sum_col = [0.7, 0.3], true_positives = [0.5, 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.5 / (0.5 + 0.7 - 0.5) + 0.3 / (0.5 + 0.3 - 0.3)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_different_thresholds_unweighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[2, 0],
# [1, 1]]
# sum_row = [2, 2], sum_col = [3, 1], true_positives = [2, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (2 / (2 + 3 - 2) + 1 / (2 + 1 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_true = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)
y_pred = tf.constant([[0.1, 0.7], [0.9, 0.3]])
threshold = 0.4 # y_pred will become [[0, 1], [1, 0]]
sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])
# cm = [[0.2, 0.4],
# [0.1, 0.3]]
# sum_row = [0.6, 0.4], sum_col = [0.3, 0.7], true_positives = [0.2, 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.3 - 0.2) + 0.3 / (0.4 + 0.7 - 0.3)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.BinaryIoU(target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
self.assertAllClose(
self.evaluate(obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = tf.constant([0.6], dtype=tf.float32)
threshold = 0.5
y_true = tf.constant([1])
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = 1 / (1 + 1 - 1)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanIoUTest(tf.test.TestCase):
def test_config(self):
m_obj = metrics.MeanIoU(num_classes=2, name='mean_iou')
self.assertEqual(m_obj.name, 'mean_iou')
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, 'mean_iou')
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_pred = tf.constant([0, 1, 0, 1], dtype=tf.float32)
y_true = tf.constant([0, 0, 1, 1])
sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)
y_true = tf.constant([[0, 0], [1, 1]])
sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
self.assertAllClose(self.evaluate(m_obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = tf.constant([1], dtype=tf.float32)
y_true = tf.constant([1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 1 / (1 + 1 - 1)) / 1
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class OneHotIoUTest(tf.test.TestCase):
def test_unweighted(self):
y_true = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = tf.constant([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
[0.1, 0.4, 0.5]])
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (1 + 2 - 0) + 1 / (3 + 1 - 1)) / 2
obj = metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_true = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = tf.constant([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
[0.1, 0.4, 0.5]])
# y_pred will be converted to [2, 2, 0, 2]
sample_weight = [0.1, 0.2, 0.3, 0.4]
# cm = [[0, 0, 0.2+0.4],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2
obj = metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class OneHotMeanIoUTest(tf.test.TestCase):
def test_unweighted(self):
y_true = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = tf.constant([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
[0.1, 0.4, 0.5]])
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 0 + 1 / (3 + 1 - 1)) / 3
obj = metrics.OneHotMeanIoU(num_classes=3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_true = tf.constant([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
])
# y_true will be converted to [2, 0, 1, 0, 0]
y_pred = tf.constant([
[0.2, 0.3, 0.5],
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.1],
[0.1, 0.4, 0.5],
[0.6, 0.2, 0.2],
])
# y_pred will be converted to [2, 2, 0, 2, 0]
sample_weight = [0.1, 0.2, 0.3, 0.3, 0.1]
# cm = [[0.1, 0, 0.2+0.3],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.4, 0, 0.6], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0.1, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.1 / (0.4 + 0.6 - 0.1) + 0 + 0.1 /
(0.6 + 0.1 - 0.1)) / 3
obj = metrics.OneHotMeanIoU(num_classes=3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
class MeanTensorTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_config(self):
with self.test_session():
m = metrics.MeanTensor(name='mean_by_element')
# check config
self.assertEqual(m.name, 'mean_by_element')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertEmpty(m.variables)
with self.assertRaisesRegex(ValueError, 'does not have any value yet'):
m.result()
self.evaluate(m([[3], [5], [3]]))
self.assertAllEqual(m._shape, [3, 1])
m2 = metrics.MeanTensor.from_config(m.get_config())
self.assertEqual(m2.name, 'mean_by_element')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, tf.float32)
self.assertEmpty(m2.variables)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_unweighted(self):
with self.test_session():
m = metrics.MeanTensor(dtype=tf.float64)
# check __call__()
self.assertAllClose(self.evaluate(m([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state([
tf.convert_to_tensor(1),
tf.convert_to_tensor(5)
])
self.evaluate(update_op)
self.assertAllClose(self.evaluate(m.result()), [50.5, 22.5])
self.assertAllClose(self.evaluate(m.total), [101, 45])
self.assertAllClose(self.evaluate(m.count), [2, 2])
# check reset_state()
m.reset_state()
self.assertAllClose(self.evaluate(m.total), [0, 0])
self.assertAllClose(self.evaluate(m.count), [0, 0])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weighted(self):
with self.test_session():
m = metrics.MeanTensor(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
# check scalar weight
result_t = m([100, 30], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [100, 30])
self.assertAllClose(self.evaluate(m.total), [50, 15])
self.assertAllClose(self.evaluate(m.count), [0.5, 0.5])
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAllClose(result, [51 / 1.5, 16 / 0.7], 2)
self.assertAllClose(self.evaluate(m.total), [51, 16])
self.assertAllClose(self.evaluate(m.count), [1.5, 0.7])
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [51.5 / 2, 17 / 1.2])
self.assertAllClose(self.evaluate(m.total), [51.5, 17])
self.assertAllClose(self.evaluate(m.count), [2, 1.2])
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAllClose(self.evaluate(result_t), [52.5 / 3, 18 / 1.4])
self.assertAllClose(self.evaluate(m.total), [52.5, 18])
self.assertAllClose(self.evaluate(m.count), [3, 1.4])
# check weights expand
m = metrics.MeanTensor(dtype=tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAllClose(self.evaluate(result_t), [[1], [5]])
self.assertAllClose(self.evaluate(m.total), [[1], [1]])
self.assertAllClose(self.evaluate(m.count), [[1], [0.2]])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_invalid_value_shape(self):
m = metrics.MeanTensor(dtype=tf.float64)
m([1])
with self.assertRaisesRegex(
ValueError, 'MeanTensor input values must always have the same shape'):
m([1, 5])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_build_in_tf_function(self):
"""Ensure that variables are created correctly in a tf function."""
m = metrics.MeanTensor(dtype=tf.float64)
@tf.function
def call_metric(x):
return m(x)
with self.test_session():
self.assertAllClose(self.evaluate(call_metric([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
self.assertAllClose(self.evaluate(call_metric([20, 2])), [60, 21])
@combinations.generate(combinations.combine(mode=['eager']))
def test_in_keras_model(self):
class ModelWithMetric(Model):
def __init__(self):
super(ModelWithMetric, self).__init__()
self.dense1 = layers.Dense(
3, activation='relu', kernel_initializer='ones')
self.dense2 = layers.Dense(
1, activation='sigmoid', kernel_initializer='ones')
self.mean_tensor = metrics.MeanTensor()
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
self.mean_tensor(self.dense1.kernel)
return x
model = ModelWithMetric()
model.compile(
loss='mae',
optimizer='rmsprop',
run_eagerly=True)
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y, batch_size=50)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 2))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 2))
model.evaluate(x, y, batch_size=25)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 4))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 4))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BinaryCrossentropyTest(tf.test.TestCase):
def test_config(self):
bce_obj = metrics.BinaryCrossentropy(
name='bce', dtype=tf.int32, label_smoothing=0.2)
self.assertEqual(bce_obj.name, 'bce')
self.assertEqual(bce_obj._dtype, tf.int32)
old_config = bce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
bce_obj2 = metrics.BinaryCrossentropy.from_config(old_config)
self.assertEqual(bce_obj2.name, 'bce')
self.assertEqual(bce_obj2._dtype, tf.int32)
new_config = bce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
result = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Reduced metric = 7.665 / 2
self.assertAllClose(self.evaluate(result), 3.833, atol=1e-3)
def test_unweighted_with_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
y_pred = tf.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
result = bce_obj(y_true, y_pred)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced metric = (0 + 66.666) / 2
self.assertAllClose(self.evaluate(result), 33.333, atol=1e-3)
def test_weighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = tf.constant([1.5, 2.])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Weighted metric = [7.665 * 1.5, 0]
# Reduced metric = 7.665 * 1.5 / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 3.285, atol=1e-3)
def test_weighted_from_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
y_pred = tf.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
sample_weight = tf.constant([2., 2.5])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted metric = [0, 66.666 * 2.5]
# Reduced metric = 66.666 * 2.5 / (2 + 2.5)
self.assertAllClose(self.evaluate(result), 37.037, atol=1e-3)
def test_label_smoothing(self):
logits = tf.constant(((100., -100., -100.)))
y_true = tf.constant(((1, 0, 1)))
label_smoothing = 0.1
# Metric: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# After label smoothing, label 1 becomes 1 - 0.5L
# label 0 becomes 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = metrics.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
result = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAllClose(expected_value, self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
cce_obj = metrics.CategoricalCrossentropy(
name='cce', dtype=tf.int32, label_smoothing=0.2)
self.assertEqual(cce_obj.name, 'cce')
self.assertEqual(cce_obj._dtype, tf.int32)
old_config = cce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
cce_obj2 = metrics.CategoricalCrossentropy.from_config(old_config)
self.assertEqual(cce_obj2.name, 'cce')
self.assertEqual(cce_obj2._dtype, tf.int32)
new_config = cce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Reduced metric = (0.051 + 2.302) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = tf.constant([1.5, 2.])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Weighted metric = [0.051 * 1.5, 2.302 * 2.]
# Reduced metric = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = tf.constant([1.5, 2.])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
# Label smoothing: z' = z * (1 - L) + L/n,
# where L = label smoothing value and n = num classes
# Label value 1 becomes: 1 - L + L/n
# Label value 0 becomes: L/n
# y_true with label_smoothing = [[0.0333, 0.9333, 0.0333],
# [0.0333, 0.0333, 0.9333]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[-0.26641, -0.00042, -0.29971],
# [-0.23316, -0.00006, -6.53479]]
# xent = [0.56654, 6.76801]
# Reduced xent = (0.56654 + 6.76801) / 2
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
loss = cce_obj(y_true, logits)
self.assertAllClose(self.evaluate(loss), 3.667, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SparseCategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
scce_obj = metrics.SparseCategoricalCrossentropy(
name='scce', dtype=tf.int32)
self.assertEqual(scce_obj.name, 'scce')
self.assertEqual(scce_obj.dtype, tf.int32)
old_config = scce_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config
scce_obj2 = metrics.SparseCategoricalCrossentropy.from_config(old_config)
self.assertEqual(scce_obj2.name, 'scce')
self.assertEqual(scce_obj2.dtype, tf.int32)
new_config = scce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# y_true * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = tf.constant([1.5, 2.])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Weighted xent = [0.051 * 1.5, 2.302 * 2.]
# Reduced xent = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = tf.constant([1.5, 2.])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_axis(self):
scce_obj = metrics.SparseCategoricalCrossentropy(axis=0)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.1], [0.95, 0.8], [0, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# logits = log(y`) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 0], [1, 0], [0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# sum(exp(logits)) = [1, 1]
# softmax = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# log(softmax) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# y * log(softmax) = [[0, 0], [-0.0513, 0], [0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
class BinaryTruePositives(metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(
tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, dtype=self.dtype)
sample_weight = tf.__internal__.ops.broadcast_weights(
sample_weight, values)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
class BinaryTruePositivesViaControlFlow(metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositivesViaControlFlow, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if y_true[i][j] and y_pred[i][j]:
if sample_weight is None:
self.true_positives.assign_add(1)
else:
self.true_positives.assign_add(sample_weight[i][0])
def result(self):
if tf.constant(True):
return self.true_positives
return 0.0
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CustomMetricsTest(tf.test.TestCase):
def test_config(self):
btp_obj = BinaryTruePositives(name='btp', dtype=tf.int32)
self.assertEqual(btp_obj.name, 'btp')
self.assertEqual(btp_obj.dtype, tf.int32)
# Check save and restore config
btp_obj2 = BinaryTruePositives.from_config(btp_obj.get_config())
self.assertEqual(btp_obj2.name, 'btp')
self.assertEqual(btp_obj2.dtype, tf.int32)
def test_unweighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))
y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
update_op = btp_obj.update_state(y_true, y_pred) # pylint: disable=assignment-from-no-return
self.evaluate(update_op)
result = btp_obj.result()
self.assertEqual(7, self.evaluate(result))
def test_weighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))
y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]])
result = btp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(12, self.evaluate(result))
def test_autograph(self):
metric = BinaryTruePositivesViaControlFlow()
self.evaluate(tf.compat.v1.variables_initializer(metric.variables))
y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]])
@tf.function
def compute_metric(y_true, y_pred, sample_weight):
metric(y_true, y_pred, sample_weight)
return metric.result()
result = compute_metric(y_true, y_pred, sample_weight)
self.assertEqual(12, self.evaluate(result))
def test_metric_wrappers_autograph(self):
def metric_fn(y_true, y_pred):
x = tf.constant(0.0)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if tf.equal(y_true[i][j], y_pred[i][j]) and y_true[i][j] > 0:
x += 1.0
return x
mean_metric = metrics.MeanMetricWrapper(metric_fn)
sum_metric = metrics.SumOverBatchSizeMetricWrapper(metric_fn)
self.evaluate(tf.compat.v1.variables_initializer(mean_metric.variables))
self.evaluate(tf.compat.v1.variables_initializer(sum_metric.variables))
y_true = tf.constant([[0, 0, 0, 1, 0],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 0, 1]])
y_pred = tf.constant([[0, 0, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 1, 1, 1, 1]])
@tf.function
def tf_functioned_metric_fn(metric, y_true, y_pred):
return metric(y_true, y_pred)
metric_result = tf_functioned_metric_fn(mean_metric, y_true, y_pred)
self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)
metric_result = tf_functioned_metric_fn(sum_metric, y_true, y_pred)
self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)
def test_metric_not_tracked_as_sublayer_in_layer(self):
class MyLayer(base_layer.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.mean_obj = metrics.Mean(name='my_mean_obj')
def call(self, x):
self.add_metric(
tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor')
self.add_metric(self.mean_obj(x))
return x
layer = MyLayer()
x = np.ones((1, 1))
layer(x)
self.assertLen(list(layer._flatten_layers(include_self=False)), 0)
self.assertLen(layer.metrics, 2)
def test_metric_not_tracked_as_sublayer_in_model(self):
class MyModel(training_module.Model):
def __init__(self, **kwargs):
super(MyModel, self).__init__(**kwargs)
self.mean_obj = metrics.Mean(name='my_mean_obj')
def call(self, x):
self.add_metric(
tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor')
self.add_metric(self.mean_obj(x))
return x
model = MyModel()
x = np.ones((1, 1))
model(x)
self.assertLen(list(model._flatten_layers(include_self=False)), 0)
self.assertLen(model.layers, 0)
self.assertLen(model.metrics, 2)
def test_invalid_custom_metric_class_error_msg(self):
x = layers.Input(shape=(2,))
y = layers.Dense(3)(x)
model = training_module.Model(x, y)
class BadMetric(metrics.Metric):
def update_state(self, y_true, y_pred, sample_weight=None):
return
def result(self):
return
with self.assertRaisesRegex(RuntimeError,
'can only be a single'):
model.compile('sgd',
'mse',
metrics=[BadMetric()])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
def test_invalid_custom_metric_fn_error_msg(self):
x = layers.Input(shape=(2,))
y = layers.Dense(3)(x)
model = training_module.Model(x, y)
def bad_metric(y_true, y_pred, sample_weight=None): # pylint: disable=unused-argument
return None
def dict_metric(y_true, y_pred, sample_weight=None): # pylint: disable=unused-argument
return {'value': 0.}
with self.assertRaisesRegex(RuntimeError,
'The output of a metric function can only be'):
model.compile('sgd',
'mse',
metrics=[bad_metric])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
with self.assertRaisesRegex(RuntimeError,
'To return a dict of values, implement'):
model.compile('sgd',
'mse',
metrics=[dict_metric])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss='mae',
metrics=compile_metrics,
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ResetStatesTest(keras_parameterized.TestCase):
def test_reset_state_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
def test_reset_state_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
def test_reset_state_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
def test_reset_state_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
def test_reset_state_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
def test_reset_state_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
def test_reset_state_sensitivity_at_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_state_specificity_at_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_state_precision_at_recall(self):
s_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_state_recall_at_precision(self):
s_obj = metrics.RecallAtPrecision(precision=0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate(( | np.ones((25, 4)) | numpy.ones |
"""Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License."""
"""Evaluates the network."""
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import plus_input_data
import numpy as np
from plus_model import tf_model
from collections import defaultdict
from scipy.spatial import distance
def fill_eval_feed_dict(data_set, placeholder, FLAGS, rel):
r_idx, t1_idx, t2_idx, labels = data_set.eval_batch()
t1x, t1mask, t1length= plus_input_data.prepare_data(t1_idx)
t2x, t2mask, t2length = plus_input_data.prepare_data(t2_idx)
relmsk = plus_input_data.rel_msk(r_idx, rel)
# define identity matrix
# t = np.zeros((r_idx.shape[0],FLAGS.embed_dim,FLAGS.embed_dim))
# t = np.asarray([1.0 if i == j else 0.0 for k in range(t.shape[0]) for i in range(t.shape[1]) for j in range(t.shape[2])], np.float32)
# t = t.reshape(r_idx.shape[0],FLAGS.embed_dim,FLAGS.embed_dim)
feed_dict = {
placeholder['t1_idx_placeholder']: t1x,
placeholder['t1_msk_placeholder']: t1mask,
placeholder['t1_length_placeholder']: t1length,
placeholder['t2_idx_placeholder']: t2x,
placeholder['t2_msk_placeholder']: t2mask,
placeholder['t2_length_placeholder']: t2length,
placeholder['rel_placeholder']: r_idx,
placeholder['label_placeholder']: labels,
placeholder['rel_msk_placeholder']: relmsk,
}
return feed_dict
def fill_feed_dict(data_set, placeholder, FLAGS, rel):
r_idx, t1_idx, t2_idx, labels = data_set.eval_batch()
t1x, t1mask, t1length= plus_input_data.prepare_data(t1_idx)
t2x, t2mask, t2length = plus_input_data.prepare_data(t2_idx)
# print('r_idx', r_idx.shape)
relmsk = plus_input_data.rel_msk(r_idx, rel)
#random find negative examples from the same batch
# nr_idx, nt1_idx, nt2_idx, nlabels = plus_input_data.find_neg(r_idx, t1_idx, t2_idx, labels)
# nt1x, nt1mask, nt1length= plus_input_data.prepare_data(nt1_idx)
# nt2x, nt2mask, nt2length = plus_input_data.prepare_data(nt2_idx)
# nrelmsk = plus_input_data.rel_msk(nr_idx, rel)
# define identity matrix
#t = np.zeros((r_idx.shape[0],FLAGS.embed_dim,FLAGS.embed_dim))
#t = np.asarray([1.0 if i == j else 0.0 for k in range(t.shape[0]) for i in range(t.shape[1]) for j in range(t.shape[2])], np.float32)
#t = t.reshape(r_idx.shape[0],FLAGS.embed_dim,FLAGS.embed_dim)
# iden = tf.Variable(t)
feed_dict = {
placeholder['t1_idx_placeholder']: t1x,
placeholder['t1_msk_placeholder']: t1mask,
placeholder['t1_length_placeholder']: t1length,
placeholder['t2_idx_placeholder']: t2x,
placeholder['t2_msk_placeholder']: t2mask,
placeholder['t2_length_placeholder']: t2length,
# placeholder['nt1_idx_placeholder']: nt1x,
# placeholder['nt1_msk_placeholder']: nt1mask,
# placeholder['nt1_length_placeholder']: nt1length,
# placeholder['nt2_idx_placeholder']: nt2x,
# placeholder['nt2_msk_placeholder']: nt2mask,
# placeholder['nt2_length_placeholder']: nt2length,
placeholder['rel_placeholder']: r_idx,
# placeholder['nrel_placeholder']: nr_idx,
placeholder['label_placeholder']: labels,
# placeholder['nlabel_placeholder']: nlabels,
placeholder['rel_msk_placeholder']: relmsk,
# placeholder['nrel_msk_placeholder']: nrelmsk,
}
return feed_dict
def best_threshold(errs, target, outfile):
indices = np.argsort(errs)
sortedErrors = errs[indices]
sortedTarget = target[indices]
tp = np.cumsum(sortedTarget)
invSortedTarget = (sortedTarget == 0).astype('float32')
Nneg = invSortedTarget.sum()
fp = np.cumsum(invSortedTarget)
tn = fp * -1 + Nneg
accuracies = (tp + tn) / sortedTarget.shape[0]
i = accuracies.argmax()
# print('errors', sortedErrors[:])
# print('target', invSortedTarget[:])
print("Accuracy for Dev:", accuracies[i], file = outfile)
# calculate recall precision and F1
Npos = sortedTarget.sum()
fn = tp * -1 + Npos
# print('tp',tp)
# print('fp',fp)
precision = tp/(tp + fp)
recall = tp/(tp + fn)
# print(precision[i])
# print(recall[i])
# print(tp[i])
# print(fp[i])
# print(tp[i]+tn[i])
f1 = (2*precision[i]*recall[i])/(precision[i]+recall[i])
# print("Precision, Recall and F1 are %.5f %.5f %.5f" % (precision[i], recall[i], f1), file = outfile)
print("Precision, Recall and F1 are %.5f %.5f %.5f" % (precision[i], recall[i], f1))
# print("Number of positives, negatives, tp, tn: %f %f %f %f" % (target.sum(), Nneg, tp[i], tn[i]))
return sortedErrors[i], accuracies[i]
def wordnet_train_eval(sess,h_error, placeholder,data_set, num, FLAGS, rel):
feed_dict = fill_eval_feed_dict(data_set, placeholder, FLAGS, rel)
true_label = feed_dict[placeholder['label_placeholder']]
he_error = sess.run(h_error, feed_dict = feed_dict)
_, acc = best_threshold(he_error, true_label)
return acc
def do_eval(sess,h_error,placeholder,data_set, devtest,test, num, curr_best, FLAGS,error_file_name,outfile, rel, words):
feed_dict = fill_eval_feed_dict(data_set, placeholder, FLAGS, rel)
true_label = feed_dict[placeholder['label_placeholder']]
he_error = sess.run(h_error, feed_dict = feed_dict)
thresh, _ = best_threshold(he_error, true_label, outfile)
#evaluat devtest
feed_dict_devtest = fill_eval_feed_dict(devtest, placeholder, FLAGS, rel)
true_label_devtest = feed_dict_devtest[placeholder['label_placeholder']]
devtest_he_error = sess.run(h_error, feed_dict = feed_dict_devtest)
pred = devtest_he_error <= thresh
correct = (pred == true_label_devtest)
accuracy = float(correct.astype('float32').mean())
wrong_indices = np.logical_not(correct).nonzero()[0]
wrong_preds = pred[wrong_indices]
#evaluate test
feed_dict_test = fill_eval_feed_dict(test, placeholder, FLAGS, rel)
true_label_test = feed_dict_test[placeholder['label_placeholder']]
test_he_error = sess.run(h_error, feed_dict = feed_dict_test)
test_pred = test_he_error <= thresh
test_correct = (test_pred == true_label_test)
test_accuracy = float(test_correct.astype('float32').mean())
test_wrong_indices = np.logical_not(test_correct).nonzero()[0]
test_wrong_preds = test_pred[test_wrong_indices]
if accuracy>curr_best:
# #evaluat devtest
error_file = open(error_file_name+"_test.txt",'wt')
if FLAGS.rel_acc:
rel_acc_checker(feed_dict_devtest, placeholder, correct, data_set, error_file, rel)
if FLAGS.error_analysis:
err_analysis(data_set, wrong_indices, feed_dict_devtest, placeholder, error_file, rel, words)
return accuracy,test_accuracy, wrong_indices, wrong_preds
def do_train_eval(sess,h_error,nh_error, placeholder,data_set, num, neg_data, curr_best, FLAGS, error_file_name, outfile, rel, words):
feed_dict = fill_feed_dict(data_set, placeholder, FLAGS, rel)
# concatenate the true and false labels
true_label = feed_dict[placeholder['label_placeholder']]
false_label = np.zeros(true_label.shape)
labels = np.concatenate((true_label, false_label), axis = 0)
# print("type of true labels",type(true_label))
he_error = sess.run(h_error, feed_dict = feed_dict)
nhe_error = sess.run(nh_error, feed_dict = feed_dict)
errors = np.concatenate((he_error, nhe_error), axis = 0)
# print("type of errors",type(he_error))
thresh, acc = best_threshold(errors, labels, outfile)
if acc > curr_best:
error_file = open(error_file_name+"_train.txt",'wt')
pred = he_error <= thresh
correct = (pred == true_label)
accuracy = float(correct.astype('float32').mean())
wrong_indices = | np.logical_not(correct) | numpy.logical_not |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import zip
from builtins import range
from sys import stdout
import multiprocessing as mp
import numpy as np
from vsm.split import split_documents
from vsm.model.ldafunctions import load_lda
from vsm.model.ldacgsseq import *
from vsm.model._cgs_update import cgs_update
import cython
import platform # For Windows comaptability
import itertools
from progressbar import ProgressBar, Percentage, Bar
__all__ = [ 'LdaCgsMulti' ]
class LdaCgsMulti(LdaCgsSeq):
"""
An implementation of LDA using collapsed Gibbs sampling with multi-processing.
On Windows platforms, LdaCgsMulti is not supported. A NotImplementedError
will be raised notifying the user to use the LdaCgsSeq package. Users
desiring a platform-independent fallback should use LDA(multiprocess=True) to
initialize the object, which will return either a LdaCgsMulti or a LdaCgsSeq
instance, depending on the platform, while raising a RuntimeWarning.
"""
def __init__(self, corpus=None, context_type=None, K=20, V=0,
alpha=[], beta=[], n_proc=2, seeds=None):
"""
Initialize LdaCgsMulti.
:param corpus: Source of observed data.
:type corpus: `Corpus`
:param context_type: Name of tokenization stored in `corpus` whose tokens
will be treated as documents.
:type context_type: string, optional
:param K: Number of topics. Default is `20`.
:type K: int, optional
:param alpha: Context priors. Default is a flat prior of 0.01
for all contexts.
:type alpha: list, optional
:param beta: Topic priors. Default is 0.01 for all topics.
:type beta: list, optional
:param n_proc: Number of processors used for training. Default is
2.
:type n_proc: int, optional
:param seeds: List of random seeds, one for each thread.
The length of the list should be same as `n_proc`. Default is `None`.
:type seeds: list of integers, optional
"""
if platform.system() == 'Windows':
raise NotImplementedError("""LdaCgsMulti is not implemented on
Windows. Please use LdaCgsSeq.""")
self._read_globals = False
self._write_globals = False
self.n_proc = n_proc
# set random seeds if unspecified
if seeds is None:
maxint = np.iinfo(np.int32).max
seeds = [np.random.randint(0, maxint) for n in range(n_proc)]
# check number of seeds == n_proc
if len(seeds) != n_proc:
raise ValueError("Number of seeds must equal number of processors " +
str(n_proc))
# initialize random states
self.seeds = seeds
self._mtrand_states = [np.random.RandomState(seed).get_state() for seed in self.seeds]
super(LdaCgsMulti, self).__init__(corpus=corpus, context_type=context_type,
K=K, V=V, alpha=alpha, beta=beta)
if corpus is not None:
self.dtype = corpus.corpus.dtype
# delete LdaCgsSeq seed and state
del self.seed
del self._mtrand_state
def _move_globals_to_locals(self):
self._write_globals = False
self.K = self.K
self.V = self.V
self.corpus = self.corpus
self.Z = self.Z
self.word_top = self.word_top
self.inv_top_sums = self.inv_top_sums
self.top_doc = self.top_doc
self.iteration = self.iteration
self._read_globals = False
global _K, _V, _corpus, _Z, _word_top, _inv_top_sums
global _top_doc, _iteration
del (_K, _V, _corpus, _Z, _word_top, _inv_top_sums,
_top_doc, _iteration)
def _move_locals_to_globals(self):
self._write_globals = True
self.K = self.K
self.V = self.V
self.corpus = self.corpus
self.Z = self.Z
self.word_top = self.word_top
self.inv_top_sums = self.inv_top_sums
self.top_doc = self.top_doc
self.iteration = self.iteration
self._read_globals = True
del (self._K_local, self._V_local, self._corpus_local, self._Z_local,
self._word_top_local, self._inv_top_sums_local,
self._top_doc_local, self._iteration_local)
@property
def word_top(self):
if self._read_globals:
return np.frombuffer(_word_top, np.float32).reshape(self.V, self.K)
return self._word_top_local
@word_top.setter
def word_top(self, a):
if self._write_globals:
global _word_top
if not '_word_top' in globals():
_word_top = mp.Array('f', self.V * self.K, lock=False)
_word_top[:] = a.reshape(-1,)
else:
self._word_top_local = a
@property
def inv_top_sums(self):
if self._read_globals:
return np.frombuffer(_inv_top_sums, np.float32)
return self._inv_top_sums_local
@inv_top_sums.setter
def inv_top_sums(self, a):
if self._write_globals:
global _inv_top_sums
if not '_inv_top_sums' in globals():
_inv_top_sums = mp.Array('f', self.K, lock=False)
_inv_top_sums[:] = a
else:
self._inv_top_sums_local = a
@property
def top_doc(self):
if self._read_globals:
top_doc = np.frombuffer(_top_doc, np.float32)
return top_doc.reshape(self.K, len(self.indices))
return self._top_doc_local
@top_doc.setter
def top_doc(self, a):
if self._write_globals:
global _top_doc
if not '_top_doc' in globals():
_top_doc = mp.Array('f', self.K * len(self.indices), lock=False)
_top_doc[:] = a.reshape(-1,)
else:
self._top_doc_local = a
@property
def corpus(self):
if self._read_globals:
return np.frombuffer(_corpus, self.dtype)
return self._corpus_local
@corpus.setter
def corpus(self, a):
if self._write_globals:
global _corpus
if not '_corpus' in globals():
if self.corpus.dtype == 'uint16':
dtype = 'H'
elif self.corpus.dtype == 'uint32':
dtype = 'I'
else:
raise NotImplementedError
_corpus = mp.Array(dtype, len(a), lock=False)
_corpus[:] = a
else:
self._corpus_local = a
@property
def Z(self):
if self._read_globals:
if self.K < 2 ** 8:
Ktype = np.uint8
elif self.K < 2 ** 16:
Ktype = np.uint16
else:
raise NotImplementedError("Invalid Ktype. k={}".format(self.K))
return np.frombuffer(_Z, Ktype)
return self._Z_local
@Z.setter
def Z(self, a):
if self._write_globals:
global _Z
if not '_Z' in globals():
if self.K < 2 ** 8:
Ktype = 'B'
elif self.K < 2 ** 16:
Ktype = 'H'
else:
raise NotImplementedError
_Z = mp.Array(Ktype, len(a), lock=False)
_Z[:] = a
else:
self._Z_local = a
@property
def K(self):
if self._read_globals:
return _K.value
return self._K_local
@K.setter
def K(self, K):
if self._write_globals:
global _K
if not '_K' in globals():
_K = mp.Value('i')
_K.value = K
else:
self._K_local = K
@property
def V(self):
if self._read_globals:
return _V.value
return self._V_local
@V.setter
def V(self, V):
if self._write_globals:
global _V
if not '_V' in globals():
_V = mp.Value('i')
_V.value = V
else:
self._V_local = V
@property
def iteration(self):
if self._read_globals:
return _iteration.value
return self._iteration_local
@iteration.setter
def iteration(self, iteration):
if self._write_globals:
global _iteration
if not '_iteration' in globals():
_iteration = mp.Value('i')
_iteration.value = iteration
else:
self._iteration_local = iteration
def train(self, n_iterations=500, verbose=1):
"""
Takes an optional argument, `n_iterations` and updates the model
`n_iterations` times.
:param n_iterations: Number of iterations. Default is 500.
:type n_iterations: int, optional
:param verbose: If `True`, current number of iterations
are printed out to notify the user. Default is `True`.
:type verbose: boolean, optional
"""
if mp.cpu_count() < self.n_proc:
raise RuntimeError("Model seeded with more cores than available." +
" Requires {0} cores.".format(self.n_proc))
self._move_locals_to_globals()
docs = split_documents(self.corpus, self.indices, self.n_proc)
doc_indices = [(0, len(docs[0]))]
for i in range(len(docs)-1):
doc_indices.append((doc_indices[i][1],
doc_indices[i][1] + len(docs[i+1])))
p = mp.Pool(len(docs))
if verbose == 1:
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=n_iterations).start()
n_iterations += self.iteration
iteration = 0
while self.iteration < n_iterations:
if verbose == 2:
stdout.write('\rIteration %d: mapping ' % self.iteration)
stdout.flush()
data = list(zip(docs, doc_indices, self._mtrand_states,
itertools.repeat(self.dtype)))
# For debugging
# results = map(update, data)
if platform.system() == 'Windows':
raise NotImplementedError("""LdaCgsMulti is not implemented on Windows.
Please use LdaCgsSeq.""")
else:
results = p.map(update, data)
if verbose == 2:
stdout.write('\rIteration %d: reducing ' % self.iteration)
stdout.flush()
if verbose == 1:
#print("Self iteration", self.iteration)
pbar.update(iteration)
(Z_ls, top_doc_ls, word_top_ls, logp_ls, mtrand_str_ls,
mtrand_keys_ls, mtrand_pos_ls, mtrand_has_gauss_ls,
mtrand_cached_gaussian_ls) = list(zip(*results))
self._mtrand_states = list(zip(mtrand_str_ls, mtrand_keys_ls, mtrand_pos_ls,
mtrand_has_gauss_ls, mtrand_cached_gaussian_ls))
for t in range(len(results)):
start, stop = docs[t][0][0], docs[t][-1][1]
self.Z[start:stop] = Z_ls[t]
self.top_doc[:, doc_indices[t][0]:doc_indices[t][1]] = top_doc_ls[t]
self.word_top = self.word_top + np.sum(word_top_ls, axis=0)
self.inv_top_sums = 1. / self.word_top.sum(0)
lp = np.sum(logp_ls, dtype=np.float32)
self.log_probs.append((self.iteration, lp))
if verbose == 2:
stdout.write('\rIteration %d: log_prob=' % self.iteration)
stdout.flush()
print('%f' % lp)
iteration += 1
self.iteration += 1
if verbose == 1:
pbar.finish()
p.close()
self._move_globals_to_locals()
@staticmethod
def load(filename):
"""
A static method for loading a saved LdaCgsMulti model.
:param filename: Name of a saved model to be loaded.
:type filename: string
:returns: m : LdaCgsMulti object
:See Also: :class:`numpy.load`
"""
if platform.system() == 'Windows':
raise NotImplementedError("""LdaCgsMulti is not implemented on
Windows. Please use LdaCgsSeq.""")
return load_lda(filename, LdaCgsMulti)
def update(args):
"""
For LdaCgsMulti
"""
(docs, doc_indices, mtrand_state, dtype) = args
start, stop = docs[0][0], docs[-1][1]
global Ktype
if _K.value < 2 ** 8:
Ktype = np.uint8
elif _K.value < 2 ** 16:
Ktype = np.uint16
else:
raise NotImplementedError("Invalid Ktype. k={}".format(_K))
corpus = | np.frombuffer(_corpus, dtype=dtype) | numpy.frombuffer |
"""
The microstructure module provide elementary classes to describe a
crystallographic granular microstructure such as mostly present in
metallic materials.
It contains several classes which are used to describe a microstructure
composed of several grains, each one having its own crystallographic
orientation:
* :py:class:`~pymicro.crystal.microstructure.Microstructure`
* :py:class:`~pymicro.crystal.microstructure.Grain`
* :py:class:`~pymicro.crystal.microstructure.Orientation`
"""
import numpy as np
import os
import vtk
import h5py
import math
from pathlib import Path
from scipy import ndimage
from matplotlib import pyplot as plt, colors
from pymicro.crystal.lattice import Lattice, Symmetry, CrystallinePhase, Crystal
from pymicro.crystal.quaternion import Quaternion
from pymicro.core.samples import SampleData
import tables
from math import atan2, pi
class Orientation:
"""Crystallographic orientation class.
This follows the passive rotation definition which means that it brings
the sample coordinate system into coincidence with the crystal coordinate
system. Then one may express a vector :math:`V_c` in the crystal coordinate
system from the vector in the sample coordinate system :math:`V_s` by:
.. math::
V_c = g.V_s
and inversely (because :math:`g^{-1}=g^T`):
.. math::
V_s = g^T.V_c
Most of the code to handle rotations has been written to comply with the
conventions laid in :cite:`Rowenhorst2015`.
"""
def __init__(self, matrix):
"""Initialization from the 9 components of the orientation matrix."""
g = np.array(matrix, dtype=np.float64).reshape((3, 3))
self._matrix = g
self.euler = Orientation.OrientationMatrix2Euler(g)
self.rod = Orientation.OrientationMatrix2Rodrigues(g)
self.quat = Orientation.OrientationMatrix2Quaternion(g, P=1)
def orientation_matrix(self):
"""Returns the orientation matrix in the form of a 3x3 numpy array."""
return self._matrix
def __repr__(self):
"""Provide a string representation of the class."""
s = 'Crystal Orientation \n-------------------'
s += '\norientation matrix = \n %s' % self._matrix.view()
s += '\nEuler angles (degrees) = (%8.3f,%8.3f,%8.3f)' % (self.phi1(), self.Phi(), self.phi2())
s += '\nRodrigues vector = %s' % self.OrientationMatrix2Rodrigues(self._matrix)
s += '\nQuaternion = %s' % self.OrientationMatrix2Quaternion(self._matrix, P=1)
return s
def to_crystal(self, v):
"""Transform a vector or a matrix from the sample frame to the crystal
frame.
:param ndarray v: a 3 component vector or a 3x3 array expressed in
the sample frame.
:return: the vector or matrix expressed in the crystal frame.
"""
if v.size not in [3, 9]:
raise ValueError('input arg must be a 3 components vector '
'or a 3x3 matrix, got %d vlaues' % v.size)
g = self.orientation_matrix()
if v.size == 3:
# input is vector
return np.dot(g, v)
else:
# input is 3x3 matrix
return np.dot(g, np.got(v, g.T))
def to_sample(self, v):
"""Transform a vector or a matrix from the crystal frame to the sample
frame.
:param ndarray v: a 3 component vector or a 3x3 array expressed in
the crystal frame.
:return: the vector or matrix expressed in the sample frame.
"""
if v.size not in [3, 9]:
raise ValueError('input arg must be a 3 components vector '
'or a 3x3 matrix, got %d vlaues' % v.size)
g = self.orientation_matrix()
if v.size == 3:
# input is vector
return np.dot(g.T, v)
else:
# input is 3x3 matrix
return np.dot(g.T, np.got(v, g))
@staticmethod
def cube():
"""Create the particular crystal orientation called Cube and which
corresponds to euler angle (0, 0, 0)."""
return Orientation.from_euler((0., 0., 0.))
@staticmethod
def brass():
"""Create the particular crystal orientation called Brass and which
corresponds to euler angle (35.264, 45, 0)."""
return Orientation.from_euler((35.264, 45., 0.))
@staticmethod
def copper():
"""Create the particular crystal orientation called Copper and which
corresponds to euler angle (90, 35.264, 45)."""
return Orientation.from_euler((90., 35.264, 45.))
@staticmethod
def s3():
"""Create the particular crystal orientation called S3 and which
corresponds to euler angle (59, 37, 63)."""
return Orientation.from_euler((58.980, 36.699, 63.435))
@staticmethod
def goss():
"""Create the particular crystal orientation called Goss and which
corresponds to euler angle (0, 45, 0)."""
return Orientation.from_euler((0., 45., 0.))
@staticmethod
def shear():
"""Create the particular crystal orientation called shear and which
corresponds to euler angle (45, 0, 0)."""
return Orientation.from_euler((45., 0., 0.))
@staticmethod
def random():
"""Create a random crystal orientation."""
from random import random
from math import acos
phi1 = random() * 360.
Phi = 180. * acos(2 * random() - 1) / np.pi
phi2 = random() * 360.
return Orientation.from_euler([phi1, Phi, phi2])
def ipf_color(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic, saturate=True):
"""Compute the IPF (inverse pole figure) colour for this orientation.
This method has bee adapted from the DCT code.
.. note::
This method coexist with the `get_ipf_colour` for the moment.
:param ndarray axis: the direction to use to compute the IPF colour.
:param Symmetry symmetry: the symmetry operator to use.
:return bool saturate: a flag to saturate the RGB values.
"""
axis /= np.linalg.norm(axis)
Vc = np.dot(self.orientation_matrix(), axis)
# get the symmetry operators
syms = symmetry.symmetry_operators()
syms = np.concatenate((syms, -syms))
Vc_syms = np.dot(syms, Vc)
# phi: rotation around 001 axis, from 100 axis to Vc vector, projected on (100,010) plane
Vc_phi = np.arctan2(Vc_syms[:, 1], Vc_syms[:, 0]) * 180 / pi
# chi: rotation around 010 axis, from 001 axis to Vc vector, projected on (100,001) plane
Vc_chi = np.arctan2(Vc_syms[:, 0], Vc_syms[:, 2]) * 180 / pi
# psi : angle from 001 axis to Vc vector
Vc_psi = np.arccos(Vc_syms[:, 2]) * 180 / pi
if symmetry is Symmetry.cubic:
angleR = 45 - Vc_chi # red color proportional to (45 - chi)
minAngleR = 0
maxAngleR = 45
angleB = Vc_phi # blue color proportional to phi
minAngleB = 0
maxAngleB = 45
elif symmetry is Symmetry.hexagonal:
angleR = 90 - Vc_psi # red color proportional to (90 - psi)
minAngleR = 0
maxAngleR = 90
angleB = Vc_phi # blue color proportional to phi
minAngleB = 0
maxAngleB = 30
else:
raise(ValueError('unsupported crystal symmetry to compute IPF color'))
# find the axis lying in the fundamental zone
fz_list = ((angleR >= minAngleR) & (angleR < maxAngleR) &
(angleB >= minAngleB) & (angleB < maxAngleB)).tolist()
if not fz_list.count(True) == 1:
raise(ValueError('problem moving to the fundamental zone'))
return None
i_SST = fz_list.index(True)
r = angleR[i_SST] / maxAngleR
g = (maxAngleR - angleR[i_SST]) / maxAngleR * (maxAngleB - angleB[i_SST]) / maxAngleB
b = (maxAngleR - angleR[i_SST]) / maxAngleR * angleB[i_SST] / maxAngleB
rgb = np.array([r, g, b])
if saturate:
rgb = rgb / rgb.max()
return rgb
def get_ipf_colour(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic):
"""Compute the IPF (inverse pole figure) colour for this orientation.
Given a particular axis expressed in the laboratory coordinate system,
one can compute the so called IPF colour based on that direction
expressed in the crystal coordinate system as :math:`[x_c,y_c,z_c]`.
There is only one tuple (u,v,w) such that:
.. math::
[x_c,y_c,z_c]=u.[0,0,1]+v.[0,1,1]+w.[1,1,1]
and it is used to assign the RGB colour.
:param ndarray axis: the direction to use to compute the IPF colour.
:param Symmetry symmetry: the symmetry operator to use.
:return tuple: a tuple contining the RGB values.
"""
axis /= np.linalg.norm(axis)
# find the axis lying in the fundamental zone
for sym in symmetry.symmetry_operators():
Osym = np.dot(sym, self.orientation_matrix())
Vc = np.dot(Osym, axis)
if Vc[2] < 0:
Vc *= -1. # using the upward direction
uvw = np.array([Vc[2] - Vc[1], Vc[1] - Vc[0], Vc[0]])
uvw /= np.linalg.norm(uvw)
uvw /= max(uvw)
if (uvw[0] >= 0. and uvw[0] <= 1.0) and (uvw[1] >= 0. and uvw[1] <= 1.0) and (
uvw[2] >= 0. and uvw[2] <= 1.0):
# print('found sym for sst')
break
return uvw
@staticmethod
def compute_mean_orientation(rods, symmetry=Symmetry.cubic):
"""Compute the mean orientation.
This function computes a mean orientation from several data points
representing orientations. Each orientation is first moved to the
fundamental zone, then the corresponding Rodrigues vectors can be
averaged to compute the mean orientation.
:param ndarray rods: a (n, 3) shaped array containing the Rodrigues
vectors of the orientations.
:param `Symmetry` symmetry: the symmetry used to move orientations
to their fundamental zone (cubic by default)
:returns: the mean orientation as an `Orientation` instance.
"""
rods_fz = np.empty_like(rods)
for i in range(len(rods)):
g = Orientation.from_rodrigues(rods[i]).orientation_matrix()
g_fz = symmetry.move_rotation_to_FZ(g, verbose=False)
o_fz = Orientation(g_fz)
rods_fz[i] = o_fz.rod
mean_orientation = Orientation.from_rodrigues(np.mean(rods_fz, axis=0))
return mean_orientation
@staticmethod
def fzDihedral(rod, n):
"""check if the given Rodrigues vector is in the fundamental zone.
After book from Morawiec :cite`Morawiec_2004`:
.. pull_quote::
The asymmetric domain is a prism with 2n-sided polygons (at the
distance $h_n$ from 0) as prism bases, and $2n$ square prism faces at
the distance $h_2 = 1$. The bases are perpendicular to the n-fold axis
and the faces are perpendicular to the twofold axes.
"""
# top and bottom face at +/-tan(pi/2n)
t = np.tan(np.pi / (2 * n))
if abs(rod[2]) > t:
return False
# 2n faces distance 1 from origin
# y <= ((2+sqrt(2))*t - (1+sqrt(2))) * x + (1+sqrt(2))*(1-t)
y, x = sorted([abs(rod[0]), abs(rod[1])])
if x > 1:
return False
return {
2: True,
3: y / (1 + math.sqrt(2)) + (1 - math.sqrt(2 / 3)) * x < 1 - 1 / math.sqrt(3),
4: y + x < math.sqrt(2),
6: y / (1 + math.sqrt(2)) + (1 - 2 * math.sqrt(2) + math.sqrt(6)) * x < math.sqrt(3) - 1
}[n]
def inFZ(self, symmetry=Symmetry.cubic):
"""Check if the given Orientation lies within the fundamental zone.
For a given crystal symmetry, several rotations can describe the same
physcial crystllographic arangement. The Rodrigues fundamental zone
(also called the asymmetric domain) restricts the orientation space
accordingly.
:param symmetry: the `Symmetry` to use.
:return bool: True if this orientation is in the fundamental zone,
False otherwise.
"""
r = self.rod
if symmetry == Symmetry.cubic:
inFZT23 = np.abs(r).sum() <= 1.0
# in the cubic symmetry, each component must be < 2 ** 0.5 - 1
inFZ = inFZT23 and np.abs(r).max() <= 2 ** 0.5 - 1
elif symmetry == Symmetry.hexagonal:
inFZ = Orientation.fzDihedral(r, 6)
else:
raise (ValueError('unsupported crystal symmetry: %s' % symmetry))
return inFZ
def move_to_FZ(self, symmetry=Symmetry.cubic, verbose=False):
"""
Compute the equivalent crystal orientation in the Fundamental Zone of
a given symmetry.
:param Symmetry symmetry: an instance of the `Symmetry` class.
:param verbose: flag for verbose mode.
:return: a new Orientation instance which lies in the fundamental zone.
"""
om = symmetry.move_rotation_to_FZ(self.orientation_matrix(), verbose=verbose)
return Orientation(om)
@staticmethod
def misorientation_MacKenzie(psi):
"""Return the fraction of the misorientations corresponding to the
given :math:`\\psi` angle in the reference solution derived By MacKenzie
in his 1958 paper :cite:`MacKenzie_1958`.
:param psi: the misorientation angle in radians.
:returns: the value in the cummulative distribution corresponding to psi.
"""
from math import sqrt, sin, cos, tan, pi, acos
psidg = 180 * psi / pi
if 0 <= psidg <= 45:
p = 2. / 15 * (1 - cos(psi))
elif 45 < psidg <= 60:
p = 2. / 15 * (3 * (sqrt(2) - 1) * sin(psi) - 2 * (1 - cos(psi)))
elif 60 < psidg <= 60.72:
p = 2. / 15 * ((3 * (sqrt(2) - 1) + 4. / sqrt(3)) * sin(psi) - 6. * (1 - cos(psi)))
elif 60.72 < psidg <= 62.8:
X = (sqrt(2) - 1) / (1 - (sqrt(2) - 1) ** 2 / tan(0.5 * psi) ** 2) ** 0.5
Y = (sqrt(2) - 1) ** 2 / ((3 - 1 / tan(0.5 * psi) ** 2) ** 0.5)
p = (2. / 15) * ((3 * (sqrt(2) - 1) + 4 / sqrt(3)) * sin(psi) - 6 * (1 - cos(psi))) \
- 8. / (5 * pi) * (2 * (sqrt(2) - 1) * acos(X / tan(0.5 * psi))
+ 1. / sqrt(3) * acos(Y / tan(0.5 * psi))) \
* sin(psi) + 8. / (5 * pi) * (2 *acos((sqrt(2) + 1) * X / sqrt(2))
+ acos((sqrt(2) + 1) * Y / sqrt(2))) * (1 - cos(psi))
else:
p = 0.
return p
@staticmethod
def misorientation_axis_from_delta(delta):
"""Compute the misorientation axis from the misorientation matrix.
:param delta: The 3x3 misorientation matrix.
:returns: the misorientation axis (normalised vector).
"""
n = np.array([delta[1, 2] - delta[2, 1], delta[2, 0] -
delta[0, 2], delta[0, 1] - delta[1, 0]])
n /= np.sqrt((delta[1, 2] - delta[2, 1]) ** 2 +
(delta[2, 0] - delta[0, 2]) ** 2 +
(delta[0, 1] - delta[1, 0]) ** 2)
return n
def misorientation_axis(self, orientation):
"""Compute the misorientation axis with another crystal orientation.
This vector is by definition common to both crystalline orientations.
:param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:returns: the misorientation axis (normalised vector).
"""
delta = np.dot(self.orientation_matrix(), orientation.orientation_matrix().T)
return Orientation.misorientation_axis_from_delta(delta)
@staticmethod
def misorientation_angle_from_delta(delta):
"""Compute the misorientation angle from the misorientation matrix.
Compute the angle associated with this misorientation matrix :math:`\\Delta g`.
It is defined as :math:`\\omega = \\arccos(\\text{trace}(\\Delta g)/2-1)`.
To avoid float rounding error, the argument is rounded to 1.0 if it is
within 1 and 1 plus 32 bits floating point precison.
.. note::
This does not account for the crystal symmetries. If you want to
find the disorientation between two orientations, use the
:py:meth:`~pymicro.crystal.microstructure.Orientation.disorientation`
method.
:param delta: The 3x3 misorientation matrix.
:returns float: the misorientation angle in radians.
"""
cw = 0.5 * (delta.trace() - 1)
if cw > 1. and cw - 1. < 10 * np.finfo('float32').eps:
# print('cw=%.20f, rounding to 1.' % cw)
cw = 1.
omega = np.arccos(cw)
return omega
def disorientation(self, orientation, crystal_structure=Symmetry.triclinic):
"""Compute the disorientation another crystal orientation.
Considering all the possible crystal symmetries, the disorientation
is defined as the combination of the minimum misorientation angle
and the misorientation axis lying in the fundamental zone, which
can be used to bring the two lattices into coincidence.
.. note::
Both orientations are supposed to have the same symmetry. This is not
necessarily the case in multi-phase materials.
:param orientation: an instance of
:py:class:`~pymicro.crystal.microstructure.Orientation` class
describing the other crystal orientation from which to compute the
angle.
:param crystal_structure: an instance of the `Symmetry` class
describing the crystal symmetry, triclinic (no symmetry) by
default.
:returns tuple: the misorientation angle in radians, the axis as a
numpy vector (crystal coordinates), the axis as a numpy vector
(sample coordinates).
"""
the_angle = np.pi
symmetries = crystal_structure.symmetry_operators()
(gA, gB) = (self.orientation_matrix(), orientation.orientation_matrix()) # nicknames
for (g1, g2) in [(gA, gB), (gB, gA)]:
for j in range(symmetries.shape[0]):
sym_j = symmetries[j]
oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied
for i in range(symmetries.shape[0]):
sym_i = symmetries[i]
oi = np.dot(sym_i, g2)
delta = np.dot(oi, oj.T)
mis_angle = Orientation.misorientation_angle_from_delta(delta)
if mis_angle < the_angle:
# now compute the misorientation axis, should check if it lies in the fundamental zone
mis_axis = Orientation.misorientation_axis_from_delta(delta)
# here we have np.dot(oi.T, mis_axis) = np.dot(oj.T, mis_axis)
# print(mis_axis, mis_angle*180/np.pi, np.dot(oj.T, mis_axis))
the_angle = mis_angle
the_axis = mis_axis
the_axis_xyz = np.dot(oi.T, the_axis)
return the_angle, the_axis, the_axis_xyz
def phi1(self):
"""Convenience methode to expose the first Euler angle."""
return self.euler[0]
def Phi(self):
"""Convenience methode to expose the second Euler angle."""
return self.euler[1]
def phi2(self):
"""Convenience methode to expose the third Euler angle."""
return self.euler[2]
def compute_XG_angle(self, hkl, omega, verbose=False):
"""Compute the angle between the scattering vector :math:`\mathbf{G_{l}}`
and :math:`\mathbf{-X}` the X-ray unit vector at a given angular position :math:`\\omega`.
A given hkl plane defines the scattering vector :math:`\mathbf{G_{hkl}}` by
the miller indices in the reciprocal space. It is expressed in the
cartesian coordinate system by :math:`\mathbf{B}.\mathbf{G_{hkl}}` and in the
laboratory coordinate system accounting for the crystal orientation
by :math:`\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The crystal is assumed to be placed on a rotation stage around the
laboratory vertical axis. The scattering vector can finally be
written as :math:`\mathbf{G_l}=\mathbf{\\Omega}.\mathbf{g}^{-1}.\mathbf{B}.\mathbf{G_{hkl}}`.
The X-rays unit vector is :math:`\mathbf{X}=[1, 0, 0]`. So the computed angle
is :math:`\\alpha=acos(-\mathbf{X}.\mathbf{G_l}/||\mathbf{G_l}||`
The Bragg condition is fulfilled when :math:`\\alpha=\pi/2-\\theta_{Bragg}`
:param hkl: the hkl plane, an instance of :py:class:`~pymicro.crystal.lattice.HklPlane`
:param omega: the angle of rotation of the crystal around the laboratory vertical axis.
:param bool verbose: activate verbose mode (False by default).
:return float: the angle between :math:`-\mathbf{X}` and :math:`\mathbf{G_{l}}` in degrees.
"""
X = np.array([1., 0., 0.])
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0],
[np.sin(omegar), np.cos(omegar), 0],
[0, 0, 1]])
Gl = R.dot(Gs)
alpha = np.arccos(np.dot(-X, Gl) / np.linalg.norm(Gl)) * 180 / np.pi
if verbose:
print('scattering vector in the crystal CS', Gc)
print('scattering vector in the sample CS', Gs)
print('scattering vector in the laboratory CS (including Omega rotation)', Gl)
print('angle (deg) between -X and G', alpha)
return alpha
@staticmethod
def solve_trig_equation(A, B, C, verbose=False):
"""Solve the trigonometric equation in the form of:
.. math::
A\cos\\theta + B\sin\\theta = C
:param float A: the A constant in the equation.
:param float B: the B constant in the equation.
:param float C: the C constant in the equation.
:return tuple: the two solutions angular values in degrees.
"""
Delta = 4 * (A ** 2 + B ** 2 - C ** 2)
if Delta < 0:
raise ValueError('Delta < 0 (%f)' % Delta)
if verbose:
print('A={0:.3f}, B={1:.3f}, C={2:.3f}, Delta={3:.1f}'.format(A, B, C, Delta))
theta_1 = 2 * np.arctan2(B - 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
theta_2 = 2 * np.arctan2(B + 0.5 * np.sqrt(Delta), A + C) * 180. / np.pi % 360
return theta_1, theta_2
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a given crystal orientation sitting on a vertical rotation axis,
there is exactly two :math:`\omega` positions in :math:`[0, 2\pi]` for which
a particular :math:`(hkl)` reflexion will fulfil Bragg's law.
According to the Bragg's law, a crystallographic plane of a given
grain will be in diffracting condition if:
.. math::
\sin\\theta=-[\mathbf{\Omega}.\mathbf{g}^{-1}\mathbf{G_c}]_1
with :math:`\mathbf{\Omega}` the matrix associated with the rotation
axis:
.. math::
\mathbf{\Omega}=\\begin{pmatrix}
\cos\omega & -\sin\omega & 0 \\\\
\sin\omega & \cos\omega & 0 \\\\
0 & 0 & 1 \\\\
\end{pmatrix}
This method solves the associated second order equation to return
the two corresponding omega angles.
:param hkl: The given cristallographic plane :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:returns tuple: :math:`(\omega_1, \omega_2)` the two values of the \
rotation angle around the vertical axis (in degrees).
"""
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV, verbose=verbose)
lambda_nm = 1.2398 / lambda_keV
gt = self.orientation_matrix().T # gt = g^{-1} in Poulsen 2004
Gc = hkl.scattering_vector()
A = np.dot(Gc, gt[0])
B = - np.dot(Gc, gt[1])
# A = h / a * gt[0, 0] + k / b * gt[0, 1] + l / c * gt[0, 2]
# B = -h / a * gt[1, 0] - k / b * gt[1, 1] - l / c * gt[1, 2]
C = -2 * np.sin(theta) ** 2 / lambda_nm # the minus sign comes from the main equation
omega_1, omega_2 = Orientation.solve_trig_equation(A, B, C, verbose=verbose)
if verbose:
print('the two omega values in degrees fulfilling the Bragg condition are (%.1f, %.1f)' % (
omega_1, omega_2))
return omega_1, omega_2
def rotating_crystal(self, hkl, lambda_keV, omega_step=0.5, display=True, verbose=False):
from pymicro.xray.xray_utils import lambda_keV_to_nm
lambda_nm = lambda_keV_to_nm(lambda_keV)
X = np.array([1., 0., 0.]) / lambda_nm
print('magnitude of X', np.linalg.norm(X))
gt = self.orientation_matrix().transpose()
(h, k, l) = hkl.miller_indices()
theta = hkl.bragg_angle(lambda_keV) * 180. / np.pi
print('bragg angle for %d%d%d reflection is %.1f' % (h, k, l, theta))
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc)
alphas = []
twothetas = []
magnitude_K = []
omegas = np.linspace(0.0, 360.0, num=360.0 / omega_step, endpoint=False)
for omega in omegas:
print('\n** COMPUTING AT OMEGA=%03.1f deg' % omega)
# prepare rotation matrix
omegar = omega * np.pi / 180
R = np.array([[np.cos(omegar), -np.sin(omegar), 0],
[np.sin(omegar), np.cos(omegar), 0],
[0, 0, 1]])
# R = R.dot(Rlt).dot(Rut) # with tilts
Gl = R.dot(Gs)
print('scattering vector in laboratory CS', Gl)
n = R.dot(gt.dot(hkl.normal()))
print('plane normal:', hkl.normal())
print(R)
print('rotated plane normal:', n, ' with a norm of', np.linalg.norm(n))
G = n / hkl.interplanar_spacing() # here G == N
print('G vector:', G, ' with a norm of', np.linalg.norm(G))
K = X + G
print('X + G vector', K)
magnitude_K.append(np.linalg.norm(K))
print('magnitude of K', np.linalg.norm(K))
alpha = np.arccos(np.dot(-X, G) / (np.linalg.norm(-X) * np.linalg.norm(G))) * 180 / np.pi
print('angle between -X and G', alpha)
alphas.append(alpha)
twotheta = np.arccos(np.dot(K, X) / (np.linalg.norm(K) * np.linalg.norm(X))) * 180 / np.pi
print('angle (deg) between K and X', twotheta)
twothetas.append(twotheta)
print('min alpha angle is ', min(alphas))
# compute omega_1 and omega_2 to verify graphically
(w1, w2) = self.dct_omega_angles(hkl, lambda_keV, verbose=False)
# gather the results in a single figure
fig = plt.figure(figsize=(12, 10))
fig.add_subplot(311)
plt.title('Looking for (%d%d%d) Bragg reflexions' % (h, k, l))
plt.plot(omegas, alphas, 'k-')
plt.xlim(0, 360)
plt.ylim(0, 180)
plt.xticks(np.arange(0, 390, 30))
# add bragg condition
plt.axhline(90 - theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$\pi/2-\\theta_{Bragg}$', xycoords='data', xy=(360, 90 - theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
# add omega solutions
plt.axvline(w1 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, ymin=0, ymax=180, linewidth=2, linestyle='dashed', color='gray')
plt.annotate('$\\omega_1$', xycoords='data', xy=(w1 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.annotate('$\\omega_2$', xycoords='data', xy=(w2 + 180, 0), horizontalalignment='center',
verticalalignment='bottom', fontsize=16)
plt.ylabel(r'Angle between $-X$ and $\mathbf{G}$')
fig.add_subplot(312)
plt.plot(omegas, twothetas, 'k-')
plt.xlim(0, 360)
# plt.ylim(0,180)
plt.xticks(np.arange(0, 390, 30))
plt.axhline(2 * theta, xmin=0, xmax=360, linewidth=2)
plt.annotate('$2\\theta_{Bragg}$', xycoords='data', xy=(360, 2 * theta), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.ylabel('Angle between $X$ and $K$')
fig.add_subplot(313)
plt.plot(omegas, magnitude_K, 'k-')
plt.xlim(0, 360)
plt.axhline(np.linalg.norm(X), xmin=0, xmax=360, linewidth=2)
plt.annotate('$1/\\lambda$', xycoords='data', xy=(360, 1 / lambda_nm), horizontalalignment='left',
verticalalignment='center', fontsize=16)
plt.axvline(w1 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.axvline(w2 + 180, linewidth=2, linestyle='dashed', color='gray')
plt.xlabel(r'Angle of rotation $\omega$')
plt.ylabel(r'Magnitude of $X+G$ (nm$^{-1}$)')
plt.subplots_adjust(top=0.925, bottom=0.05, left=0.1, right=0.9)
if display:
plt.show()
else:
plt.savefig('rotating_crystal_plot_%d%d%d.pdf' % (h, k, l))
@staticmethod
def compute_instrument_transformation_matrix(rx_offset, ry_offset, rz_offset):
"""Compute instrument transformation matrix for given rotation offset.
This function compute a 3x3 rotation matrix (passive convention) that
transforms the sample coordinate system by rotating around the 3
cartesian axes in this order: rotation around X is applied first,
then around Y and finally around Z.
A sample vector :math:`V_s` is consequently transformed into
:math:`V'_s` as:
.. math::
V'_s = T^T.V_s
:param double rx_offset: value to apply for the rotation around X.
:param double ry_offset: value to apply for the rotation around Y.
:param double rz_offset: value to apply for the rotation around Z.
:return: a 3x3 rotation matrix describing the transformation applied
by the diffractometer.
"""
angle_zr = np.radians(rz_offset)
angle_yr = np.radians(ry_offset)
angle_xr = np.radians(rx_offset)
Rz = np.array([[np.cos(angle_zr), -np.sin(angle_zr), 0],
[np.sin(angle_zr), np.cos(angle_zr), 0],
[0, 0, 1]])
Ry = np.array([[np.cos(angle_yr), 0, np.sin(angle_yr)],
[0, 1, 0],
[-np.sin(angle_yr), 0, np.cos(angle_yr)]])
Rx = np.array([[1, 0, 0],
[0, np.cos(angle_xr), -np.sin(angle_xr)],
[0, np.sin(angle_xr), np.cos(angle_xr)]])
T = Rz.dot(np.dot(Ry, Rx))
return T
def topotomo_tilts(self, hkl, T=None, verbose=False):
"""Compute the tilts for topotomography alignment.
:param hkl: the hkl plane, an instance of
:py:class:`~pymicro.crystal.lattice.HklPlane`
:param ndarray T: transformation matrix representing the diffractometer
direction at omega=0.
:param bool verbose: activate verbose mode (False by default).
:returns tuple: (ut, lt) the two values of tilts to apply (in radians).
"""
if T is None:
T = np.eye(3) # identity be default
gt = self.orientation_matrix().transpose()
Gc = hkl.scattering_vector()
Gs = gt.dot(Gc) # in the cartesian sample CS
# apply instrument specific settings
Gs = np.dot(T.T, Gs)
# find topotomo tilts
ut = np.arctan(Gs[1] / Gs[2])
lt = np.arctan(-Gs[0] / (Gs[1] * np.sin(ut) + Gs[2] * np.cos(ut)))
if verbose:
print('up tilt (samrx) should be %.3f' % (ut * 180 / np.pi))
print('low tilt (samry) should be %.3f' % (lt * 180 / np.pi))
return ut, lt
@staticmethod
def from_euler(euler, convention='Bunge'):
"""Rotation matrix from Euler angles.
This is the classical method to obtain an orientation matrix by 3
successive rotations. The result depends on the convention used
(how the successive rotation axes are chosen). In the Bunge convention,
the first rotation is around Z, the second around the new X and the
third one around the new Z. In the Roe convention, the second one
is around Y.
"""
if convention == 'Roe':
(phi1, phi, phi2) = (euler[0] + 90, euler[1], euler[2] - 90)
else:
(phi1, phi, phi2) = euler
g = Orientation.Euler2OrientationMatrix((phi1, phi, phi2))
o = Orientation(g)
return o
@staticmethod
def from_rodrigues(rod):
g = Orientation.Rodrigues2OrientationMatrix(rod)
o = Orientation(g)
return o
@staticmethod
def from_Quaternion(q):
g = Orientation.Quaternion2OrientationMatrix(q)
o = Orientation(g)
return o
@staticmethod
def Zrot2OrientationMatrix(x1=None, x2=None, x3=None):
"""Compute the orientation matrix from the rotated coordinates given
in the .inp file for Zebulon's computations.
The function needs two of the three base vectors, the third one is
computed using a cross product.
.. note::
Still need some tests to validate this function.
:param x1: the first basis vector.
:param x2: the second basis vector.
:param x3: the third basis vector.
:return: the corresponding 3x3 orientation matrix.
"""
if x1 is None and x2 is None:
raise NameError('Need at least two vectors to compute the matrix')
elif x1 is None and x3 is None:
raise NameError('Need at least two vectors to compute the matrix')
elif x3 is None and x2 is None:
raise NameError('Need at least two vectors to compute the matrix')
if x1 is None:
x1 = np.cross(x2, x3)
elif x2 is None:
x2 = np.cross(x3, x1)
elif x3 is None:
x3 = np.cross(x1, x2)
x1 = x1 / np.linalg.norm(x1)
x2 = x2 / np.linalg.norm(x2)
x3 = x3 / np.linalg.norm(x3)
g = np.array([x1, x2, x3]).transpose()
return g
@staticmethod
def OrientationMatrix2EulerSF(g):
"""
Compute the Euler angles (in degrees) from the orientation matrix
in a similar way as done in Mandel_crystal.c
"""
tol = 0.1
r = np.zeros(9, dtype=np.float64) # double precision here
# Z-set order for tensor is 11 22 33 12 23 13 21 32 31
r[0] = g[0, 0]
r[1] = g[1, 1]
r[2] = g[2, 2]
r[3] = g[0, 1]
r[4] = g[1, 2]
r[5] = g[0, 2]
r[6] = g[1, 0]
r[7] = g[2, 1]
r[8] = g[2, 0]
phi = np.arccos(r[2])
if phi == 0.:
phi2 = 0.
phi1 = np.arcsin(r[6])
if abs(np.cos(phi1) - r[0]) > tol:
phi1 = np.pi - phi1
else:
x2 = r[5] / np.sin(phi)
x1 = r[8] / np.sin(phi);
if x1 > 1.:
x1 = 1.
if x2 > 1.:
x2 = 1.
if x1 < -1.:
x1 = -1.
if x2 < -1.:
x2 = -1.
phi2 = np.arcsin(x2)
phi1 = np.arcsin(x1)
if abs(np.cos(phi2) * np.sin(phi) - r[7]) > tol:
phi2 = np.pi - phi2
if abs(np.cos(phi1) * np.sin(phi) + r[4]) > tol:
phi1 = np.pi - phi1
return np.degrees(np.array([phi1, phi, phi2]))
@staticmethod
def OrientationMatrix2Euler(g):
"""
Compute the Euler angles from the orientation matrix.
This conversion follows the paper of Rowenhorst et al. :cite:`Rowenhorst2015`.
In particular when :math:`g_{33} = 1` within the machine precision,
there is no way to determine the values of :math:`\phi_1` and :math:`\phi_2`
(only their sum is defined). The convention is to attribute
the entire angle to :math:`\phi_1` and set :math:`\phi_2` to zero.
:param g: The 3x3 orientation matrix
:return: The 3 euler angles in degrees.
"""
eps = np.finfo('float').eps
(phi1, Phi, phi2) = (0.0, 0.0, 0.0)
# treat special case where g[2, 2] = 1
if np.abs(g[2, 2]) >= 1 - eps:
if g[2, 2] > 0.0:
phi1 = np.arctan2(g[0][1], g[0][0])
else:
phi1 = -np.arctan2(-g[0][1], g[0][0])
Phi = np.pi
else:
Phi = np.arccos(g[2][2])
zeta = 1.0 / np.sqrt(1.0 - g[2][2] ** 2)
phi1 = np.arctan2(g[2][0] * zeta, -g[2][1] * zeta)
phi2 = np.arctan2(g[0][2] * zeta, g[1][2] * zeta)
# ensure angles are in the range [0, 2*pi]
if phi1 < 0.0:
phi1 += 2 * np.pi
if Phi < 0.0:
Phi += 2 * np.pi
if phi2 < 0.0:
phi2 += 2 * np.pi
return np.degrees([phi1, Phi, phi2])
@staticmethod
def OrientationMatrix2Rodrigues(g):
"""
Compute the rodrigues vector from the orientation matrix.
:param g: The 3x3 orientation matrix representing the rotation.
:returns: The Rodrigues vector as a 3 components array.
"""
t = g.trace() + 1
if np.abs(t) < np.finfo(g.dtype).eps:
print('warning, returning [0., 0., 0.], consider using axis, angle '
'representation instead')
return np.zeros(3)
else:
r1 = (g[1, 2] - g[2, 1]) / t
r2 = (g[2, 0] - g[0, 2]) / t
r3 = (g[0, 1] - g[1, 0]) / t
return np.array([r1, r2, r3])
@staticmethod
def OrientationMatrix2Quaternion(g, P=1):
q0 = 0.5 * np.sqrt(1 + g[0, 0] + g[1, 1] + g[2, 2])
q1 = P * 0.5 * np.sqrt(1 + g[0, 0] - g[1, 1] - g[2, 2])
q2 = P * 0.5 * np.sqrt(1 - g[0, 0] + g[1, 1] - g[2, 2])
q3 = P * 0.5 * np.sqrt(1 - g[0, 0] - g[1, 1] + g[2, 2])
if g[2, 1] < g[1, 2]:
q1 = q1 * -1
elif g[0, 2] < g[2, 0]:
q2 = q2 * -1
elif g[1, 0] < g[0, 1]:
q3 = q3 * -1
q = Quaternion(np.array([q0, q1, q2, q3]), convention=P)
return q.quat
@staticmethod
def Rodrigues2OrientationMatrix(rod):
"""
Compute the orientation matrix from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: The 3x3 orientation matrix representing the rotation.
"""
r = np.linalg.norm(rod)
I = np.diagflat(np.ones(3))
if r < np.finfo(r.dtype).eps:
# the rodrigues vector is zero, return the identity matrix
return I
theta = 2 * np.arctan(r)
n = rod / r
omega = np.array([[0.0, n[2], -n[1]],
[-n[2], 0.0, n[0]],
[n[1], -n[0], 0.0]])
g = I + np.sin(theta) * omega + (1 - np.cos(theta)) * omega.dot(omega)
return g
@staticmethod
def Rodrigues2Axis(rod):
"""
Compute the axis/angle representation from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: A tuple in the (axis, angle) form.
"""
r = np.linalg.norm(rod)
axis = rod / r
angle = 2 * np.arctan(r)
return axis, angle
@staticmethod
def Axis2OrientationMatrix(axis, angle):
"""
Compute the (passive) orientation matrix associated the rotation defined by the given (axis, angle) pair.
:param axis: the rotation axis.
:param angle: the rotation angle (degrees).
:returns: the 3x3 orientation matrix.
"""
omega = np.radians(angle)
c = np.cos(omega)
s = np.sin(omega)
g = np.array([[c + (1 - c) * axis[0] ** 2,
(1 - c) * axis[0] * axis[1] + s * axis[2],
(1 - c) * axis[0] * axis[2] - s * axis[1]],
[(1 - c) * axis[0] * axis[1] - s * axis[2],
c + (1 - c) * axis[1] ** 2,
(1 - c) * axis[1] * axis[2] + s * axis[0]],
[(1 - c) * axis[0] * axis[2] + s * axis[1],
(1 - c) * axis[1] * axis[2] - s * axis[0],
c + (1 - c) * axis[2] ** 2]])
return g
@staticmethod
def Euler2Axis(euler):
"""Compute the (axis, angle) representation associated to this (passive)
rotation expressed by the Euler angles.
:param euler: 3 euler angles (in degrees).
:returns: a tuple containing the axis (a vector) and the angle (in radians).
"""
(phi1, Phi, phi2) = np.radians(euler)
t = np.tan(0.5 * Phi)
s = 0.5 * (phi1 + phi2)
d = 0.5 * (phi1 - phi2)
tau = np.sqrt(t ** 2 + np.sin(s) ** 2)
alpha = 2 * np.arctan2(tau, np.cos(s))
if alpha > np.pi:
axis = np.array([-t / tau * np.cos(d), -t / tau * np.sin(d), -1 / tau * np.sin(s)])
angle = 2 * np.pi - alpha
else:
axis = np.array([t / tau * np.cos(d), t / tau * np.sin(d), 1 / tau * np.sin(s)])
angle = alpha
return axis, angle
@staticmethod
def Euler2Quaternion(euler, P=1):
"""Compute the quaternion from the 3 euler angles (in degrees).
:param tuple euler: the 3 euler angles in degrees.
:param int P: +1 to compute an active quaternion (default), -1 for a passive quaternion.
:return: a `Quaternion` instance representing the rotation.
"""
(phi1, Phi, phi2) = np.radians(euler)
q0 = np.cos(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q1 = np.cos(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q2 = np.sin(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q3 = np.sin(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q = Quaternion(np.array([q0, -P * q1, -P * q2, -P * q3]), convention=P)
if q0 < 0:
# the scalar part must be positive
q.quat = q.quat * -1
return q
@staticmethod
def Euler2Rodrigues(euler):
"""Compute the rodrigues vector from the 3 euler angles (in degrees).
:param euler: the 3 Euler angles (in degrees).
:return: the rodrigues vector as a 3 components numpy array.
"""
(phi1, Phi, phi2) = np.radians(euler)
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3])
@staticmethod
def eu2ro(euler):
"""Transform a series of euler angles into rodrigues vectors.
:param ndarray euler: the (n, 3) shaped array of Euler angles (radians).
:returns: a (n, 3) array with the rodrigues vectors.
"""
if euler.ndim != 2 or euler.shape[1] != 3:
raise ValueError('Wrong shape for the euler array: %s -> should be (n, 3)' % euler.shape)
phi1, Phi, phi2 = np.squeeze(np.split(euler, 3, axis=1))
a = 0.5 * (phi1 - phi2)
b = 0.5 * (phi1 + phi2)
r1 = np.tan(0.5 * Phi) * np.cos(a) / np.cos(b)
r2 = np.tan(0.5 * Phi) * np.sin(a) / np.cos(b)
r3 = np.tan(b)
return np.array([r1, r2, r3]).T
@staticmethod
def Euler2OrientationMatrix(euler):
"""Compute the orientation matrix :math:`\mathbf{g}` associated with
the 3 Euler angles :math:`(\phi_1, \Phi, \phi_2)`.
The matrix is calculated via (see the `euler_angles` recipe in the
cookbook for a detailed example):
.. math::
\mathbf{g}=\\begin{pmatrix}
\cos\phi_1\cos\phi_2 - \sin\phi_1\sin\phi_2\cos\Phi &
\sin\phi_1\cos\phi_2 + \cos\phi_1\sin\phi_2\cos\Phi &
\sin\phi_2\sin\Phi \\\\
-\cos\phi_1\sin\phi_2 - \sin\phi_1\cos\phi_2\cos\Phi &
-\sin\phi_1\sin\phi_2 + \cos\phi_1\cos\phi_2\cos\Phi &
\cos\phi_2\sin\Phi \\\\
\sin\phi_1\sin\Phi & -\cos\phi_1\sin\Phi & \cos\Phi \\\\
\end{pmatrix}
:param euler: The triplet of the Euler angles (in degrees).
:return g: The 3x3 orientation matrix.
"""
(rphi1, rPhi, rphi2) = np.radians(euler)
c1 = np.cos(rphi1)
s1 = np.sin(rphi1)
c = np.cos(rPhi)
s = np.sin(rPhi)
c2 = np.cos(rphi2)
s2 = np.sin(rphi2)
# rotation matrix g
g11 = c1 * c2 - s1 * s2 * c
g12 = s1 * c2 + c1 * s2 * c
g13 = s2 * s
g21 = -c1 * s2 - s1 * c2 * c
g22 = -s1 * s2 + c1 * c2 * c
g23 = c2 * s
g31 = s1 * s
g32 = -c1 * s
g33 = c
g = np.array([[g11, g12, g13], [g21, g22, g23], [g31, g32, g33]])
return g
@staticmethod
def Quaternion2Euler(q):
"""
Compute Euler angles from a Quaternion
:param q: Quaternion
:return: Euler angles (in degrees, Bunge convention)
"""
P = q.convention
(q0, q1, q2, q3) = q.quat
q03 = q0 ** 2 + q3 ** 2
q12 = q1 ** 2 + q2 ** 2
chi = np.sqrt(q03 * q12)
if chi == 0.:
if q12 == 0.:
phi_1 = atan2(-2 * P * q0 * q3, q0 ** 2 - q3 ** 2)
Phi = 0.
else:
phi_1 = atan2(-2 * q1 * q2, q1 ** 2 - q2 ** 2)
Phi = pi
phi_2 = 0.
else:
phi_1 = atan2((q1 * q3 - P * q0 * q2) / chi,
(-P * q0 * q1 - q2 * q3) / chi)
Phi = atan2(2 * chi, q03 - q12)
phi_2 = atan2((P * q0 * q2 + q1 * q3) / chi,
(q2 * q3 - P * q0 * q1) / chi)
return np.degrees([phi_1, Phi, phi_2])
@staticmethod
def Quaternion2OrientationMatrix(q):
P = q.convention
(q0, q1, q2, q3) = q.quat
qbar = q0 ** 2 - q1 ** 2 - q2 ** 2 - q3 ** 2
g = np.array([[qbar + 2 * q1 ** 2, 2 * (q1 * q2 - P * q0 * q3), 2 * (q1 * q3 + P * q0 * q2)],
[2 * (q1 * q2 + P * q0 * q3), qbar + 2 * q2 ** 2, 2 * (q2 * q3 - P * q0 * q1)],
[2 * (q1 * q3 - P * q0 * q2), 2 * (q2 * q3 + P * q0 * q1), qbar + 2 * q3 ** 2]])
return g
@staticmethod
def read_euler_txt(txt_path):
"""
Read a set of euler angles from an ascii file.
This method is deprecated, please use `read_orientations`.
:param str txt_path: path to the text file containing the euler angles.
:returns dict: a dictionary with the line number and the corresponding
orientation.
"""
return Orientation.read_orientations(txt_path)
@staticmethod
def read_orientations(txt_path, data_type='euler', **kwargs):
"""
Read a set of grain orientations from a text file.
The text file must be organised in 3 columns (the other are ignored),
corresponding to either the three euler angles or the three rodrigues
vector components, depending on the data_type). Internally the ascii
file is read by the genfromtxt function of numpy, to which additional
keyworks (such as the delimiter) can be passed to via the kwargs
dictionnary.
:param str txt_path: path to the text file containing the orientations.
:param str data_type: 'euler' (default) or 'rodrigues'.
:param dict kwargs: additional parameters passed to genfromtxt.
:returns dict: a dictionary with the line number and the corresponding
orientation.
"""
data = np.genfromtxt(txt_path, **kwargs)
size = len(data)
orientations = []
for i in range(size):
angles = np.array([float(data[i, 0]), float(data[i, 1]), float(data[i, 2])])
if data_type == 'euler':
orientations.append([i + 1, Orientation.from_euler(angles)])
elif data_type == 'rodrigues':
orientations.append([i + 1, Orientation.from_rodrigues(angles)])
return dict(orientations)
@staticmethod
def read_euler_from_zset_inp(inp_path):
"""Read a set of grain orientations from a z-set input file.
In z-set input files, the orientation data may be specified
either using the rotation of two vector, euler angles or
rodrigues components directly. For instance the following
lines are extracted from a polycrystalline calculation file
using the rotation keyword:
::
**elset elset1 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 0.438886 -1.028805 0.197933 x3 1.038339 0.893172 1.003888
**elset elset2 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 0.178825 -0.716937 1.043300 x3 0.954345 0.879145 1.153101
**elset elset3 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 -0.540479 -0.827319 1.534062 x3 1.261700 1.284318 1.004174
**elset elset4 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 -0.941278 0.700996 0.034552 x3 1.000816 1.006824 0.885212
**elset elset5 *file au.mat *integration theta_method_a 1.0 1.e-9 150
*rotation x1 -2.383786 0.479058 -0.488336 x3 0.899545 0.806075 0.984268
:param str inp_path: the path to the ascii file to read.
:returns dict: a dictionary of the orientations associated with the
elset names.
"""
inp = open(inp_path)
lines = inp.readlines()
for i, line in enumerate(lines):
if line.lstrip().startswith('***material'):
break
euler_lines = []
for j, line in enumerate(lines[i + 1:]):
# read until next *** block
if line.lstrip().startswith('***'):
break
if not line.lstrip().startswith('%') and line.find('**elset') >= 0:
euler_lines.append(line)
euler = []
for l in euler_lines:
tokens = l.split()
elset = tokens[tokens.index('**elset') + 1]
irot = tokens.index('*rotation')
if tokens[irot + 1] == 'x1':
x1 = np.empty(3, dtype=float)
x1[0] = float(tokens[irot + 2])
x1[1] = float(tokens[irot + 3])
x1[2] = float(tokens[irot + 4])
x3 = np.empty(3, dtype=float)
x3[0] = float(tokens[irot + 6])
x3[1] = float(tokens[irot + 7])
x3[2] = float(tokens[irot + 8])
euler.append([elset,
Orientation.Zrot2OrientationMatrix(x1=x1, x3=x3)])
else: # euler angles
phi1 = tokens[irot + 1]
Phi = tokens[irot + 2]
phi2 = tokens[irot + 3]
angles = np.array([float(phi1), float(Phi), float(phi2)])
euler.append([elset, Orientation.from_euler(angles)])
return dict(euler)
def slip_system_orientation_tensor(self, s):
"""Compute the orientation strain tensor m^s for this
:py:class:`~pymicro.crystal.microstructure.Orientation` and the given
slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
M^s_{ij} = \left(l^s_i.n^s_j)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
return np.outer(l_rot, n_rot)
def slip_system_orientation_strain_tensor(self, s):
"""Compute the orientation strain tensor m^s for this
:py:class:`~pymicro.crystal.microstructure.Orientation` and the given
slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
m^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j + l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
m = 0.5 * (np.outer(l_rot, n_rot) + np.outer(n_rot, l_rot))
return m
def slip_system_orientation_rotation_tensor(self, s):
"""Compute the orientation rotation tensor q^s for this
:py:class:`~pymicro.crystal.microstructure.Orientation` and the given
slip system.
:param s: an instance of :py:class:`~pymicro.crystal.lattice.SlipSystem`
.. math::
q^s_{ij} = \\frac{1}{2}\left(l^s_i.n^s_j - l^s_j.n^s_i)
"""
gt = self.orientation_matrix().transpose()
plane = s.get_slip_plane()
n_rot = np.dot(gt, plane.normal())
slip = s.get_slip_direction()
l_rot = np.dot(gt, slip.direction())
q = 0.5 * (np.outer(l_rot, n_rot) - np.outer(n_rot, l_rot))
return q
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor for this crystal orientation and the
given slip system.
:param slip_system: a `SlipSystem` instance.
:param load_direction: a unit vector describing the loading direction
(default: vertical axis [0, 0, 1]).
:return float: a number between 0 ad 0.5.
"""
plane = slip_system.get_slip_plane()
gt = self.orientation_matrix().transpose()
n_rot = np.dot(gt, plane.normal()) # plane.normal() is a unit vector
slip = slip_system.get_slip_direction().direction()
slip_rot = np.dot(gt, slip)
schmid_factor = np.abs(np.dot(n_rot, load_direction) *
np.dot(slip_rot, load_direction))
return schmid_factor
def compute_all_schmid_factors(self, slip_systems,
load_direction=[0., 0., 1], verbose=False):
"""Compute all Schmid factors for this crystal orientation and the
given list of slip systems.
:param slip_systems: a list of the slip systems from which to compute
the Schmid factor values.
:param load_direction: a unit vector describing the loading direction
(default: vertical axis [0, 0, 1]).
:param bool verbose: activate verbose mode.
:return list: a list of the schmid factors.
"""
schmid_factor_list = []
for ss in slip_systems:
sf = self.schmid_factor(ss, load_direction)
if verbose:
print('Slip system: %s, Schmid factor is %.3f' % (ss, sf))
schmid_factor_list.append(sf)
return schmid_factor_list
@staticmethod
def compute_m_factor(o1, ss1, o2, ss2):
"""Compute the m factor with another slip system.
:param Orientation o1: the orientation the first grain.
:param SlipSystem ss1: the slip system in the first grain.
:param Orientation o2: the orientation the second grain.
:param SlipSystem ss2: the slip system in the second grain.
:returns: the m factor as a float number < 1
"""
# orientation matrices
gt1 = o1.orientation_matrix().T
gt2 = o2.orientation_matrix().T
# slip plane normal in sample local frame
n1 = np.dot(gt1, ss1.get_slip_plane().normal())
n2 = np.dot(gt2, ss2.get_slip_plane().normal())
# slip direction in sample local frame
l1 = np.dot(gt1, ss1.get_slip_direction().direction())
l2 = np.dot(gt2, ss2.get_slip_direction().direction())
# m factor calculation
m = abs(np.dot(n1, n2) * np.dot(l1, l2))
return m
class Grain:
"""
Class defining a crystallographic grain.
A grain has a constant crystallographic `Orientation` and a grain id. The
center attribute is the center of mass of the grain in world coordinates.
The volume of the grain is expressed in pixel/voxel unit.
"""
def __init__(self, grain_id, grain_orientation):
self.id = grain_id
self.orientation = grain_orientation
self.center = np.array([0., 0., 0.])
self.volume = 0
self.vtkmesh = None
self.hkl_planes = []
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n * id = %d\n' % (self.__class__.__name__, self.id)
s += ' * %s\n' % (self.orientation)
s += ' * center %s\n' % np.array_str(self.center)
s += ' * has vtk mesh ? %s\n' % (self.vtkmesh != None)
return s
def get_volume(self):
return self.volume
def get_volume_fraction(self, total_volume=None):
"""Compute the grain volume fraction.
:param float total_volume: the total volume value to use.
:return float: the grain volume fraction as a number in the range [0, 1].
"""
if not total_volume:
return 1.
else:
return self.volume / total_volume
def schmid_factor(self, slip_system, load_direction=[0., 0., 1]):
"""Compute the Schmid factor of this grain for the given slip system
and loading direction.
:param slip_system: a `SlipSystem` instance.
:param load_direction: a unit vector describing the loading direction
(default: vertical axis [0, 0, 1]).
:return float: a number between 0 ad 0.5.
"""
return self.orientation.schmid_factor(slip_system, load_direction)
def SetVtkMesh(self, mesh):
"""Set the VTK mesh of this grain.
:param mesh: the grain mesh in VTK format.
"""
self.vtkmesh = mesh
def add_vtk_mesh(self, array, contour=True, verbose=False):
"""Add a mesh to this grain.
This method process a labeled array to extract the geometry of the
grain. The grain shape is defined by the pixels with a value of the
grain id. A vtkUniformGrid object is created and thresholded or
contoured depending on the value of the flag `contour`. The resulting
mesh is returned, centered on the center of mass of the grain.
:param ndarray array: a numpy array from which to extract the grain shape.
:param bool contour: a flag to use contour mode for the shape.
:param bool verbose: activate verbose mode.
"""
label = self.id # we use the grain id here...
# create vtk structure
from scipy import ndimage
from vtk.util import numpy_support
grain_size = np.shape(array)
array_bin = (array == label).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(array_bin, array)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array_bin, order='F'), deep=1)
grid = vtk.vtkUniformGrid()
grid.SetOrigin(-local_com[0], -local_com[1], -local_com[2])
grid.SetSpacing(1, 1, 1)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR, vtk.vtkInformation())
else:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR)
if contour:
grid.SetExtent(0, grain_size[0] - 1, 0,
grain_size[1] - 1, 0, grain_size[2] - 1)
grid.GetPointData().SetScalars(vtk_data_array)
# contouring selected grain
contour = vtk.vtkContourFilter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
contour.SetInputData(grid)
else:
contour.SetInput(grid)
contour.SetValue(0, 0.5)
contour.Update()
if verbose:
print(contour.GetOutput())
self.SetVtkMesh(contour.GetOutput())
else:
grid.SetExtent(0, grain_size[0], 0, grain_size[1], 0, grain_size[2])
grid.GetCellData().SetScalars(vtk_data_array)
# threshold selected grain
thresh = vtk.vtkThreshold()
thresh.ThresholdBetween(0.5, 1.5)
# thresh.ThresholdBetween(label-0.5, label+0.5)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
thresh.SetInputData(grid)
else:
thresh.SetInput(grid)
thresh.Update()
if verbose:
print('thresholding label: %d' % label)
print(thresh.GetOutput())
self.SetVtkMesh(thresh.GetOutput())
def vtk_file_name(self):
return 'grain_%d.vtu' % self.id
def save_vtk_repr(self, file_name=None):
import vtk
if not file_name:
file_name = self.vtk_file_name()
print('writting ' + file_name)
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(file_name)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
writer.SetInputData(self.vtkmesh)
else:
writer.SetInput(self.vtkmesh)
writer.Write()
def load_vtk_repr(self, file_name, verbose=False):
import vtk
if verbose:
print('reading ' + file_name)
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_name)
reader.Update()
self.vtkmesh = reader.GetOutput()
def orientation_matrix(self):
"""A method to access the grain orientation matrix.
:return: the grain 3x3 orientation matrix.
"""
return self.orientation.orientation_matrix()
def dct_omega_angles(self, hkl, lambda_keV, verbose=False):
"""Compute the two omega angles which satisfy the Bragg condition.
For a grain with a given crystal orientation sitting on a vertical
rotation axis, there is exactly two omega positions in [0, 2pi] for
which a particular hkl reflexion will fulfil Bragg's law.
See :py:func:`~pymicro.crystal.microstructure.Orientation.dct_omega_angles`
of the :py:class:`~pymicro.crystal.microstructure.Orientation` class.
:param hkl: The given cristallographic :py:class:`~pymicro.crystal.lattice.HklPlane`
:param float lambda_keV: The X-rays energy expressed in keV
:param bool verbose: Verbose mode (False by default)
:return tuple: (w1, w2) the two values of the omega angle.
"""
return self.orientation.dct_omega_angles(hkl, lambda_keV, verbose)
@staticmethod
def from_dct(label=1, data_dir='.'):
"""Create a `Grain` instance from a DCT grain file.
:param int label: the grain id.
:param str data_dir: the data root from where to fetch data files.
:return: a new grain instance.
"""
grain_path = os.path.join(data_dir, '4_grains', 'phase_01', 'grain_%04d.mat' % label)
grain_info = h5py.File(grain_path)
g = Grain(label, Orientation.from_rodrigues(grain_info['R_vector'].value))
g.center = grain_info['center'].value
# add spatial representation of the grain if reconstruction is available
grain_map_path = os.path.join(data_dir, '5_reconstruction', 'phase_01_vol.mat')
if os.path.exists(grain_map_path):
with h5py.File(grain_map_path, 'r') as f:
# because how matlab writes the data, we need to swap X and Z axes in the DCT volume
vol = f['vol'].value.transpose(2, 1, 0)
from scipy import ndimage
grain_data = vol[ndimage.find_objects(vol == label)[0]]
g.volume = ndimage.measurements.sum(vol == label)
# create the vtk representation of the grain
g.add_vtk_mesh(grain_data, contour=False)
return g
class GrainData(tables.IsDescription):
"""
Description class specifying structured storage of grain data in
Microstructure Class, in HDF5 node /GrainData/GrainDataTable
"""
# grain identity number
idnumber = tables.Int32Col() # Signed 32-bit integer
# grain volume
volume = tables.Float32Col() # float
# grain center of mass coordinates
center = tables.Float32Col(shape=(3,)) # float (double-precision)
# Rodrigues vector defining grain orientation
orientation = tables.Float32Col(shape=(3,)) # float (double-precision)
# Grain Bounding box
bounding_box = tables.Int32Col(shape=(3, 2)) # Signed 64-bit integer
class Microstructure(SampleData):
"""
Class used to manipulate a full microstructure derived from the
`SampleData` class.
As SampleData, this class is a data container for a mechanical sample and
its microstructure, synchronized with a HDF5 file and a XML file
Microstructure implements a hdf5 data model specific to polycrystalline
sample data.
The dataset maintains a `GrainData` instance which inherits from
tables.IsDescription and acts as a structured array containing the grain
attributes such as id, orientations (in form of rodrigues vectors), volume
and bounding box.
A crystal `Lattice` is also associated to the microstructure and used in
all crystallography calculations.
"""
def __init__(self,
filename=None, name='micro', description='empty',
verbose=False, overwrite_hdf5=False, phase=None,
autodelete=False):
if filename is None:
# only add '_' if not present at the end of name
filename = name + (not name.endswith('_')) * '_' + 'data'
# prepare arguments for after file open
after_file_open_args = {'phase':phase}
# call SampleData constructor
SampleData.__init__(self, filename=filename, sample_name=name,
sample_description=description, verbose=verbose,
overwrite_hdf5=overwrite_hdf5,
autodelete=autodelete,
after_file_open_args=after_file_open_args)
return
def _after_file_open(self, phase=None, **kwargs):
"""Initialization code to run after opening a Sample Data file."""
self.grains = self.get_node('GrainDataTable')
if self._file_exist:
self.active_grain_map = self.get_attribute('active_grain_map',
'CellData')
if self.active_grain_map is None:
self.set_active_grain_map()
self._init_phase(phase)
if not hasattr(self, 'active_phase_id'):
self.active_phase_id = 1
else:
self.set_active_grain_map()
self._init_phase(phase)
self.active_phase_id = 1
return
def __repr__(self):
"""Provide a string representation of the class."""
s = '%s\n' % self.__class__.__name__
s += '* name: %s\n' % self.get_sample_name()
# TODO print phases here
s += '* lattice: %s\n' % self.get_lattice()
s += '\n'
# if self._verbose:
# for g in self.grains:
# s += '* %s' % g.__repr__
s += SampleData.__repr__(self)
return s
def minimal_data_model(self):
"""Data model for a polycrystalline microstructure.
Specify the minimal contents of the hdf5 (Group names, paths and group
types) in the form of a dictionary {content: location}. This extends
`~pymicro.core.SampleData.minimal_data_model` method.
:return: a tuple containing the two dictionnaries.
"""
minimal_content_index_dic = {'Image_data': '/CellData',
'grain_map': '/CellData/grain_map',
'phase_map': '/CellData/phase_map',
'mask': '/CellData/mask',
'Mesh_data': '/MeshData',
'Grain_data': '/GrainData',
'GrainDataTable': ('/GrainData/'
'GrainDataTable'),
'Phase_data': '/PhaseData'}
minimal_content_type_dic = {'Image_data': '3DImage',
'grain_map': 'field_array',
'phase_map': 'field_array',
'mask': 'field_array',
'Mesh_data': 'Mesh',
'Grain_data': 'Group',
'GrainDataTable': GrainData,
'Phase_data': 'Group'}
return minimal_content_index_dic, minimal_content_type_dic
def _init_phase(self, phase):
self._phases = []
if phase is None:
# create a default crystalline phase
phase = CrystallinePhase()
# if the h5 file does not exist yet, store the phase as metadata
if not self._file_exist: #FIXME is this useful?
self.add_phase(phase)
else:
self.sync_phases()
# if no phase is there, create one
if len(self.get_phase_ids_list()) == 0:
print('no phase was found in this dataset, adding a defualt one')
self.add_phase(phase)
return
def sync_phases(self):
"""This method sync the _phases attribute with the content of the hdf5
file.
"""
self._phases = []
# loop on the phases present in the group /PhaseData
phase_group = self.get_node('/PhaseData')
for child in phase_group._v_children:
d = self.get_dic_from_attributes('/PhaseData/%s' % child)
#print(d)
phase = CrystallinePhase.from_dict(d)
self._phases.append(phase)
#print('%d phases found in the data set' % len(self._phases))
def set_phase(self, phase):
"""Set a phase for the given `phase_id`.
If the phase id does not correspond to one of the existing phase,
nothing is done.
:param CrystallinePhase phase: the phase to use.
:param int phase_id:
"""
if phase.phase_id > self.get_number_of_phases():
print('the phase_id given (%d) does not correspond to any existing '
'phase, the phase list has not been modified.')
return
d = phase.to_dict()
print('setting phase %d with %s' % (phase.phase_id, phase.name))
self.add_attributes(d, '/PhaseData/phase_%02d' % phase.phase_id)
self.sync_phases()
def set_phases(self, phase_list):
"""Set a list of phases for this microstructure.
The different phases in the list are added in that order.
:param list phase_list: the list of phases to use.
"""
# delete all node in the phase_group
self.remove_node('/PhaseData', recursive=True)
self.add_group('PhaseData', location='/', indexname='Phase_data')
self.sync_phases()
# add each phase
for phase in phase_list:
self.add_phase(phase)
def get_number_of_phases(self):
"""Return the number of phases in this microstructure.
Each crystal phase is stored in a list attribute: `_phases`. Note that
it may be different (although it should not) from the different
phase ids in the phase_map array.
:return int: the number of phases in the microstructure.
"""
return len(self._phases)
def get_number_of_grains(self, from_grain_map=False):
"""Return the number of grains in this microstructure.
:return: the number of grains in the microstructure.
"""
if from_grain_map:
return len(np.unique(self.get_grain_map()))
else:
return self.grains.nrows
def add_phase(self, phase):
"""Add a new phase to this microstructure.
Before adding this phase, the phase id is set to the corresponding id.
:param CrystallinePhase phase: the phase to add.
"""
# this phase should have id self.get_number_of_phases() + 1
new_phase_id = self.get_number_of_phases() + 1
if not phase.phase_id == new_phase_id:
print('warning, adding phase with phase_id = %d (was %d)' %
(new_phase_id, phase.phase_id))
phase.phase_id = new_phase_id
self._phases.append(phase)
self.add_group('phase_%02d' % new_phase_id, location='/PhaseData',
indexname='phase_%02d' % new_phase_id, replace=True)
d = phase.to_dict()
self.add_attributes(d, '/PhaseData/phase_%02d' % new_phase_id)
print('new phase added: %s' % phase.name)
def get_phase_ids_list(self):
"""Return the list of the phase ids."""
return [phase.phase_id for phase in self._phases]
def get_phase(self, phase_id=None):
"""Get a crystalline phase.
If no phase_id is given, the active phase is returned.
:param int phase_id: the id of the phase to return.
:return: the `CrystallinePhase` corresponding to the id.
"""
if phase_id is None:
phase_id = self.active_phase_id
index = self.get_phase_ids_list().index(phase_id)
return self._phases[index]
def get_lattice(self, phase_id=None):
"""Get the crystallographic lattice associated with this microstructure.
If no phase_id is given, the `Lattice` of the active phase is returned.
:return: an instance of the `Lattice class`.
"""
return self.get_phase(phase_id).get_lattice()
def get_grain_map(self, as_numpy=True):
grain_map = self.get_field(self.active_grain_map)
if self._is_empty(self.active_grain_map):
grain_map = None
elif grain_map.ndim == 2:
# reshape to 3D
new_dim = self.get_attribute('dimension', 'CellData')
if len(new_dim) == 3:
grain_map = grain_map.reshape((new_dim))
else:
grain_map = grain_map.reshape((grain_map.shape[0],
grain_map.shape[1],
1))
return grain_map
def get_phase_map(self, as_numpy=True):
phase_map = self.get_field('phase_map')
if self._is_empty('phase_map'):
phase_map = None
elif phase_map.ndim == 2:
# reshape to 3D
new_dim = self.get_attribute('dimension', 'CellData')
if len(new_dim) == 3:
phase_map = phase_map.reshape((new_dim))
else:
phase_map = phase_map.reshape((phase_map.shape[0],
phase_map.shape[1],
1))
return phase_map
def get_mask(self, as_numpy=False):
mask = self.get_field('mask')
if self._is_empty('mask'):
mask = None
elif mask.ndim == 2:
# reshape to 3D
new_dim = self.get_attribute('dimension', 'CellData')
if len(new_dim) == 3:
mask = mask.reshape((new_dim))
else:
mask = mask.reshape((mask.shape[0],
mask.shape[1],
1))
return mask
def get_ids_from_grain_map(self):
"""Return the list of grain ids found in the grain map.
By convention, only positive values are taken into account, 0 is
reserved for the background and -1 for overlap regions.
:return: a 1D numpy array containing the grain ids.
"""
grain_map = self.get_node('grain_map')
grains_id = np.unique(grain_map)
grains_id = grains_id[grains_id > 0]
return grains_id
def get_grain_ids(self):
"""Return the grain ids found in the GrainDataTable.
:return: a 1D numpy array containing the grain ids.
"""
return self.get_tablecol('GrainDataTable', 'idnumber')
@staticmethod
def id_list_to_condition(id_list):
"""Convert a list of id to a condition to filter the grain table.
The condition will be interpreted using Numexpr typically using
a `read_where` call on the grain data table.
:param list id_list: a non empty list of the grain ids.
:return: the condition as a string .
"""
if not len(id_list) > 0:
raise ValueError('the list of grain ids must not be empty')
condition = "\'(idnumber == %d)" % id_list[0]
for grain_id in id_list[1:]:
condition += " | (idnumber == %d)" % grain_id
condition += "\'"
return condition
def get_grain_volumes(self, id_list=None):
"""Get the grain volumes.
The grain data table is queried and the volumes of the grains are
returned in a single array. An optional list of grain ids can be used
to restrict the grains, by default all the grain volumes are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain volumes.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['volume']
else:
return self.get_tablecol('GrainDataTable', 'volume')
def get_grain_centers(self, id_list=None):
"""Get the grain centers.
The grain data table is queried and the centers of the grains are
returned in a single array. An optional list of grain ids can be used
to restrict the grains, by default all the grain centers are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain centers.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['center']
else:
return self.get_tablecol('GrainDataTable', 'center')
def get_grain_rodrigues(self, id_list=None):
"""Get the grain rodrigues vectors.
The grain data table is queried and the rodrigues vectors of the grains
are returned in a single array. An optional list of grain ids can be
used to restrict the grains, by default all the grain rodrigues vectors
are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain rodrigues vectors.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['orientation']
else:
return self.get_tablecol('GrainDataTable', 'orientation')
def get_grain_orientations(self, id_list=None):
"""Get a list of the grain orientations.
The grain data table is queried to retreiv the rodrigues vectors.
An optional list of grain ids can be used to restrict the grains.
A list of `Orientation` instances is then created and returned.
:param list id_list: a non empty list of the grain ids.
:return: a list of the grain orientations.
"""
rods = self.get_grain_rodrigues(id_list)
orientations = [Orientation.from_rodrigues(rod) for rod in rods]
return orientations
def get_grain_bounding_boxes(self, id_list=None):
"""Get the grain bounding boxes.
The grain data table is queried and the bounding boxes of the grains
are returned in a single array. An optional list of grain ids can be
used to restrict the grains, by default all the grain bounding boxes
are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain bounding boxes.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['bounding_box']
else:
return self.get_tablecol('GrainDataTable', 'bounding_box')
def get_voxel_size(self):
"""Get the voxel size for image data of the microstructure.
If this instance of `Microstructure` has no image data, None is returned.
"""
try:
return self.get_attribute(attrname='spacing',
nodename='/CellData')[0]
except:
return None
def get_grain(self, gid):
"""Get a particular grain given its id.
This method browses the microstructure and return the grain
corresponding to the given id. If the grain is not found, the
method raises a `ValueError`.
:param int gid: the grain id.
:return: The method return a new `Grain` instance with the corresponding id.
"""
try:
gr = self.grains.read_where('(idnumber == gid)')[0]
except:
raise ValueError('grain %d not found in the microstructure' % gid)
grain = Grain(gr['idnumber'],
Orientation.from_rodrigues(gr['orientation']))
grain.center = gr['center']
grain.volume = gr['volume']
return grain
def get_all_grains(self):
"""Build a list of `Grain` instances for all grains in this `Microstructure`.
:return: a list of the grains.
"""
grains_list = [self.get_grain(gid)
for gid in self.get_tablecol('GrainDataTable', 'idnumber')]
return grains_list
def get_grain_positions(self):
"""Return all the grain positions as a numpy array of shape (n, 3)
where n is the number of grains.
:return: a numpy array of shape (n, 3) of the grain positions.
"""
return self.grains[:]['center']
def get_grain_volume_fractions(self):
"""Compute all grains volume fractions.
:return: a 1D numpy array with all grain volume fractions.
"""
total_volume = np.sum(self.grains[:]['volume'])
return self.grains[:]['volume'] / total_volume
def get_grain_volume_fraction(self, gid, use_total_volume_value=None):
"""Compute the volume fraction of this grain.
:param int gid: the grain id.
:param float use_total_volume_value: the total volume value to use.
:return float: the grain volume fraction as a number in the range [0, 1].
"""
# compute the total volume
if use_total_volume_value:
total_volume = use_total_volume_value
else:
# sum all the grain volume to compute the total volume
total_volume = np.sum(self.get_grain_volumes())
volume_fraction = self.get_grain_volumes(id_list=[gid])[0] / total_volume
return volume_fraction
def set_orientations(self, orientations):
""" Store grain orientations array in GrainDataTable
orientation : (Ngrains, 3) array of rodrigues orientation vectors
"""
self.set_tablecol('GrainDataTable', 'orientation', column=orientations)
return
def set_centers(self, centers):
""" Store grain centers array in GrainDataTable
centers : (Ngrains, 3) array of grain centers of mass
"""
self.set_tablecol('GrainDataTable', 'center', column=centers)
return
def set_bounding_boxes(self, bounding_boxes):
""" Store grain bounding boxes array in GrainDataTable
"""
self.set_tablecol('GrainDataTable', 'bounding_box', column=bounding_boxes)
return
def set_volumes(self, volumes):
""" Store grain volumes array in GrainDataTable
"""
self.set_tablecol('GrainDataTable', 'volume', column=volumes)
return
def set_lattice(self, lattice, phase_id=None):
"""Set the crystallographic lattice associated with this microstructure.
If no `phase_id` is specified, the lattice will be set for the active
phase.
:param Lattice lattice: an instance of the `Lattice class`.
:param int phase_id: the id of the phase to set the lattice.
"""
if phase_id is None:
phase_id = self.active_phase_id
self.get_phase(phase_id)._lattice = lattice
def set_active_grain_map(self, map_name='grain_map'):
"""Set the active grain map name to inputed string.
The active_grain_map string is used as Name to get the grain_map field
in the dataset through the SampleData "get_field" method.
"""
self.active_grain_map = map_name
self.add_attributes({'active_grain_map':map_name}, 'CellData')
return
def set_grain_map(self, grain_map, voxel_size=None,
map_name='grain_map'):
"""Set the grain map for this microstructure.
:param ndarray grain_map: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit. Used only
if the CellData image Node must be created.
"""
# TODO: ad compression_options
create_image = True
if self.__contains__('CellData'):
empty = self.get_attribute(attrname='empty', nodename='CellData')
if not empty:
create_image = False
if create_image:
if (voxel_size is None):
msg = 'Please specify voxel size for CellData image'
raise ValueError(msg)
if np.isscalar(voxel_size):
dim = len(grain_map.shape)
spacing_array = voxel_size*np.ones((dim,))
else:
if len(voxel_size) != len(grain_map.shape):
raise ValueError('voxel_size array must have a length '
'equal to grain_map shape')
spacing_array = voxel_size
self.add_image_from_field(field_array=grain_map,
fieldname=map_name,
imagename='CellData', location='/',
spacing=spacing_array,
replace=True)
else:
# Handle case of a 2D Microstrucutre: squeeze grain map to
# ensure (Nx,Ny,1) array will be stored as (Nx,Ny)
if self._get_group_type('CellData') == '2DImage':
grain_map = grain_map.squeeze()
self.add_field(gridname='CellData', fieldname=map_name,
array=grain_map, replace=True)
self.set_active_grain_map(map_name)
return
def set_phase_map(self, phase_map, voxel_size=None):
"""Set the phase map for this microstructure.
:param ndarray phase_map: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit. Used only
if the CellData image Node must be created.
"""
# TODO: add compression_options
create_image = True
if self.__contains__('CellData'):
empty = self.get_attribute(attrname='empty', nodename='CellData')
if not empty:
create_image = False
if create_image:
if voxel_size is None:
msg = 'Please specify voxel size for CellData image'
raise ValueError(msg)
if np.isscalar(voxel_size):
dim = len(phase_map.shape)
spacing_array = voxel_size*np.ones((dim,))
else:
if len(voxel_size) != len(phase_map.shape):
raise ValueError('voxel_size array must have a length '
'equal to grain_map shape')
spacing_array = voxel_size
self.add_image_from_field(phase_map, 'phase_map',
imagename='CellData', location='/',
spacing=spacing_array,
replace=True)
else:
self.add_field(gridname='CellData', fieldname='phase_map',
array=phase_map, replace=True,
indexname='phase_map')
def set_mask(self, mask, voxel_size=None):
"""Set the mask for this microstructure.
:param ndarray mask: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit. Used only
if the CellData image Node must be created.
"""
# TODO: add compression_options
create_image = True
if self.__contains__('CellData'):
empty = self.get_attribute(attrname='empty', nodename='CellData')
if not (empty):
create_image = False
if create_image:
if (voxel_size is None):
msg = 'Please specify voxel size for CellData image'
raise ValueError(msg)
if np.isscalar(voxel_size):
dim = len(mask.shape)
spacing_array = voxel_size*np.ones((dim,))
else:
if len(voxel_size) != len(mask.shape):
raise ValueError('voxel_size array must have a length '
'equal to grain_map shape')
spacing_array = voxel_size
self.add_image_from_field(mask, 'mask',
imagename='CellData', location='/',
spacing=spacing_array,
replace=True)
else:
self.add_field(gridname='CellData', fieldname='mask',
array=mask, replace=True, indexname='mask')
return
def set_random_orientations(self):
""" Set random orientations for all grains in GrainDataTable """
for grain in self.grains:
o = Orientation.random()
grain['orientation'] = o.rod
grain.update()
self.grains.flush()
return
def remove_grains_not_in_map(self):
"""Remove from GrainDataTable grains that are not in the grain map."""
_,not_in_map,_ = self.compute_grains_map_table_intersection()
self.remove_grains_from_table(not_in_map)
return
def remove_small_grains(self, min_volume=1.0, sync_table=False,
new_grain_map_name=None):
"""Remove from grain_map and grain data table small volume grains.
Removed grains in grain map will be replaced by background ID (0).
To be sure that the method acts consistently with the current grain
map, activate sync_table options.
:param float min_volume: Grains whose volume is under or equal to this
value willl be suppressed from grain_map and grain data table.
:param bool sync_table: If `True`, synchronize gran data table with
grain map before removing grains.
:param str new_grain_map_name: If provided, store the new grain map
with removed grain with this new name. If not, overright the
current active grain map
"""
if sync_table and not self._is_empty('grain_map'):
self.sync_grain_table_with_grain_map(sync_geometry=True)
condition = f"(volume <= {min_volume})"
id_list = self.grains.read_where(condition)['idnumber']
if not self._is_empty('grain_map'):
# Remove grains from grain map
grain_map = self.get_grain_map()
grain_map[np.where(np.isin(grain_map,id_list))] = 0
if new_grain_map_name is not None:
map_name = new_grain_map_name
else:
map_name = self.active_grain_map
self.set_grain_map(grain_map.squeeze(), map_name=map_name)
# Remove grains from table
self.remove_grains_from_table(id_list)
return
def remove_grains_from_table(self, ids):
"""Remove from GrainDataTable the grains with given ids.
:param ids: Array of grain ids to remove from GrainDataTable
:type ids: list
"""
for Id in ids:
where = self.grains.get_where_list('idnumber == Id')[:]
self.grains.remove_row(int(where))
return
def add_grains(self, euler_list, grain_ids=None):
"""A a list of grains to this microstructure.
This function adds a list of grains represented by a list of Euler
angles triplets, to the microstructure. If provided, the `grain_ids`
list will be used for the grain ids.
:param list euler_list: the list of euler angles (Bunge passive convention).
:param list grain_ids: an optional list for the ids of the new grains.
"""
grain = self.grains.row
# build a list of grain ids if it is not given
if grain_ids is None:
if self.get_number_of_grains() > 0:
min_id = max(self.get_grain_ids())
else:
min_id = 0
grain_ids = range(min_id, min_id + len(euler_list))
print('adding %d grains to the microstructure' % len(grain_ids))
for gid, euler in zip(grain_ids, euler_list):
grain['idnumber'] = gid
grain['orientation'] = Orientation.Euler2Rodrigues(euler)
grain.append()
self.grains.flush()
def add_grains_in_map(self):
"""Add to GrainDataTable the grains in grain map missing in table.
The grains are added with a random orientation by convention.
"""
_, _, not_in_table = self.compute_grains_map_table_intersection()
# remove ID <0 from list (reserved to background)
not_in_table = np.delete(not_in_table, np.where(not_in_table <= 0))
# generate random eule r angles
phi1 = np.random.rand(len(not_in_table),1) * 360.
Phi = 180. * np.arccos(2 * np.random.rand(len(not_in_table),1)- 1) / np.pi
phi2 = np.random.rand(len(not_in_table),1) * 360.
euler_list = np.concatenate((phi1,Phi,phi2), axis=1)
self.add_grains(euler_list, grain_ids=not_in_table)
return
@staticmethod
def random_texture(n=100):
"""Generate a random texture microstructure.
**parameters:**
*n* The number of grain orientations in the microstructure.
"""
m = Microstructure(name='random_texture', overwrite_hdf5=True)
grain = m.grains.row
for i in range(n):
grain['idnumber'] = i + 1
o = Orientation.random()
grain['orientation'] = o.rod
grain.append()
m.grains.flush()
return m
def set_mesh(self, mesh_object=None, file=None, meshname='micro_mesh'):
"""Add a mesh of the microstructure to the dataset.
Mesh can be inputed as a BasicTools mesh object or as a mesh file.
Handled file format for now only include .geof (Zset software fmt).
"""
self.add_mesh(mesh_object=mesh_object, meshname=meshname,
location='/MeshData', replace=True, file=file)
return
def create_grain_ids_field(self, meshname=None, store=True):
"""Create a grain Id field of grain orientations on the input mesh.
Creates a element wise field from the microsctructure mesh provided,
adding to each element the value of the Rodrigues vector
of the local grain element set, as it is and if it is referenced in the
`GrainDataTable` node.
:param str mesh: Name, Path or index name of the mesh on which an
orientation map element field must be constructed
:param bool store: If `True`, store the orientation map in `CellData`
image group, with name `orientation_map`
"""
# TODO : adapt data model to include microstructure mesh and adapt
# routine arugments and code to the new data model
# input a mesh_object or a mesh group ?
# check if mesh is provided
if meshname is None:
raise ValueError('meshname do not refer to an existing mesh')
if not(self._is_mesh(meshname)) or self._is_empty(meshname):
raise ValueError('meshname do not refer to a non empty mesh'
'group')
# create empty element vector field
Nelements = int(self.get_attribute('Number_of_elements',meshname))
mesh = self.get_node(meshname)
El_tag_path = os.path.join(mesh._v_pathname,'Geometry','ElementsTags')
ID_field = np.zeros((Nelements,1),dtype=float)
grainIds = self.get_grain_ids()
# if mesh is provided
for i in range(len(grainIds)):
set_name = 'ET_grain_'+str(grainIds[i]).strip()
elset_path = os.path.join(El_tag_path, set_name)
element_ids = self.get_node(elset_path, as_numpy=True)
ID_field[element_ids] = grainIds[i]
if store:
self.add_field(gridname=meshname, fieldname='grain_ids',
array=ID_field)
return ID_field
def create_orientation_field(self, meshname=None, store=True):
"""Create a vector field of grain orientations on the inputed mesh.
Creates a element wise field from the microsctructure mesh provided,
adding to each element the value of the Rodrigues vector
of the local grain element set, as it is and if it is referenced in the
`GrainDataTable` node.
:param str mesh: Name, Path or index name of the mesh on which an
orientation map element field must be constructed
:param bool store: If `True`, store the orientation map in `CellData`
image group, with name `orientation_map`
"""
# TODO : adapt data model to include microstructure mesh and adapt
# routine arugments and code to the new data model
# input a mesh_object or a mesh group ?
# check if mesh is provided
if meshname is None:
raise ValueError('meshname do not refer to an existing mesh')
if not(self._is_mesh(meshname)) or self._is_empty(meshname):
raise ValueError('meshname do not refer to a non empty mesh'
'group')
# create empty element vector field
Nelements = int(self.get_attribute('Number_of_elements',meshname))
mesh = self.get_node(meshname)
El_tag_path = os.path.join(mesh._v_pathname,'Geometry','ElementsTags')
orientation_field = np.zeros((Nelements,3),dtype=float)
grainIds = self.get_grain_ids()
grain_orientations = self.get_grain_rodrigues()
# if mesh is provided
for i in range(len(grainIds)):
set_name = 'ET_grain_'+str(grainIds[i]).strip()
elset_path = os.path.join(El_tag_path, set_name)
element_ids = self.get_node(elset_path, as_numpy=True)
orientation_field[element_ids,:] = grain_orientations[i,:]
if store:
self.add_field(gridname=meshname, fieldname='grain_orientations',
array=orientation_field)
return orientation_field
def create_orientation_map(self, store=True):
"""Create a vector field in CellData of grain orientations.
Creates a (Nx, Ny, Nz, 3) or (Nx, Ny, 3) field from the microsctructure
`grain_map`, adding to each voxel the value of the Rodrigues vector
of the local grain Id, as it is and if it is referenced in the
`GrainDataTable` node.
:param bool store: If `True`, store the orientation map in `CellData`
image group, with name `orientation_map`
"""
# safety check
if self._is_empty('grain_map'):
raise RuntimeError('The microstructure instance has an empty'
'`grain_map` node. Cannot create orientation'
' map')
grain_map = self.get_grain_map().squeeze()
grainIds = self.get_grain_ids()
grain_orientations = self.get_grain_rodrigues()
# safety check 2
grain_list = np.unique(grain_map)
# remove -1 and 0 from the list of grains in grain map (Ids reserved
# for background and overlaps in non-dilated reconstructed grain maps)
grain_list = np.delete(grain_list, np.isin(grain_list, [-1, 0]))
if not np.all(np.isin(grain_list, grainIds)):
raise ValueError('Some grain Ids in `grain_map` are not referenced'
' in the `GrainDataTable` array. Cannot create'
' orientation map.')
# create empty orientation map with right dimensions
im_dim = self.get_attribute('dimension', 'CellData')
shape_orientation_map = np.empty(shape=(len(im_dim)+1), dtype='int32')
shape_orientation_map[:-1] = im_dim
shape_orientation_map[-1] = 3
orientation_map = np.zeros(shape=shape_orientation_map, dtype=float)
omap_X = orientation_map[..., 0]
omap_Y = orientation_map[..., 1]
omap_Z = orientation_map[..., 2]
for i in range(len(grainIds)):
slc = np.where(grain_map == grainIds[i])
omap_X[slc] = grain_orientations[i, 0]
omap_Y[slc] = grain_orientations[i, 1]
omap_Z[slc] = grain_orientations[i, 2]
if store:
self.add_field(gridname='CellData', fieldname='orientation_map',
array=orientation_map, replace=True,
location='CellData')
return orientation_map
def add_IPF_maps(self):
"""Add IPF maps to the data set.
IPF colors are computed for the 3 cartesian directions and stored into
the h5 file in the `CellData` image group, with names `ipf_map_100`,
`ipf_map_010`, `ipf_map_001`.
"""
ipf100 = self.create_IPF_map(axis=np.array([1., 0., 0.]))
self.add_field(gridname='CellData', fieldname='ipf_map_100',
array=ipf100, replace=True)
ipf010 = self.create_IPF_map(axis=np.array([0., 1., 0.]))
self.add_field(gridname='CellData', fieldname='ipf_map_010',
array=ipf010, replace=True)
ipf001 = self.create_IPF_map(axis=np.array([0., 0., 1.]))
self.add_field(gridname='CellData', fieldname='ipf_map_001',
array=ipf001, replace=True)
del ipf100, ipf010, ipf001
def create_IPF_map(self, axis=np.array([0., 0., 1.])):
"""Create a vector field in CellData to store the IPF colors.
Creates a (Nx, Ny, Nz, 3) field with the IPF color for each voxel.
Note that this function assumes a single orientation per grain.
:param axis: the unit vector for the load direction to compute IPF
colors.
"""
dims = list(self.get_grain_map().shape)
grain_ids = self.get_grain_ids()
shape_ipf_map = list(dims) + [3]
ipf_map = np.zeros(shape=shape_ipf_map, dtype=float)
for i in range(len(grain_ids)):
gid = grain_ids[i]
# use the bounding box for this grain
bb = self.grains.read_where('idnumber == %d' % gid)['bounding_box'][0]
grain_map = self.get_grain_map()[bb[0][0]:bb[0][1],
bb[1][0]:bb[1][1],
bb[2][0]:bb[2][1]]
o = self.get_grain(gid).orientation
ipf_map[bb[0][0]:bb[0][1],
bb[1][0]:bb[1][1],
bb[2][0]:bb[2][1]][grain_map == gid] = o.ipf_color(axis, symmetry=self.get_lattice(
).get_symmetry(), saturate=True)
progress = 100 * (1 + i) / len(grain_ids)
print('computing IPF map: {0:.2f} %'.format(progress), end='\r')
return ipf_map.squeeze()
def view_slice(self, slice=None, color='random', show_mask=True,
show_grain_ids=False, highlight_ids=None, slip_system=None,
axis=[0., 0., 1], show_slip_traces=False, hkl_planes=None,
display=True):
"""A simple utility method to show one microstructure slice.
The microstructure can be colored using different fields such as a
random color (default), the grain ids, the Schmid factor or the grain
orientation using IPF coloring.
The plot can be customized in several ways. Annotations can be added
in the grains (ids, lattice plane traces) and the list of grains where
annotations are shown can be controled using the `highlight_ids`
argument. By default, if present, the mask will be shown.
:param int slice: the slice number
:param str color: a string to chose the colormap from ('random',
'grain_ids', 'schmid', 'ipf')
:param bool show_mask: a flag to show the mask by transparency.
:param bool show_grain_ids: a flag to annotate the plot with the grain
ids.
:param list highlight_ids: a list of grain ids to restrict the
annotations (by default all grains are annotated).
:param slip_system: an instance (or a list of instances) of the class
SlipSystem to compute the Schmid factor.
:param axis: the unit vector for the load direction to compute
the Schmid factor or to display IPF coloring.
:param bool show_slip_traces: activate slip traces plot in each grain.
:param list hkl_planes: the list of planes to plot the slip traces.
:param bool display: if True, the show method is called, otherwise,
the figure is simply returned.
"""
if self._is_empty('grain_map'):
print('Microstructure instance mush have a grain_map field to use '
'this method')
return
grain_map = self.get_grain_map(as_numpy=True)
if slice is None or slice > grain_map.shape[2] - 1 or slice < 0:
slice = grain_map.shape[2] // 2
print('using slice value %d' % slice)
grains_slice = grain_map[:, :, slice]
gids = np.intersect1d(np.unique(grains_slice), self.get_grain_ids())
fig, ax = plt.subplots()
if color == 'random':
grain_cmap = Microstructure.rand_cmap(first_is_black=True)
ax.imshow(grains_slice.T, cmap=grain_cmap, vmin=0, interpolation='nearest')
elif color == 'grain_ids':
ax.imshow(grains_slice.T, cmap='viridis', vmin=0, interpolation='nearest')
elif color == 'schmid':
# construct a Schmid factor image
schmid_image = np.zeros_like(grains_slice, dtype=float)
for gid in gids:
o = self.get_grain(gid).orientation
if type(slip_system) == list:
# compute max Schmid factor
sf = max(o.compute_all_schmid_factors(
slip_systems=slip_system, load_direction=axis))
else:
sf = o.schmid_factor(slip_system, axis)
schmid_image[grains_slice == gid] = sf
from matplotlib import cm
plt.imshow(schmid_image.T, cmap=cm.gray, vmin=0, vmax=0.5, interpolation='nearest')
cb = plt.colorbar()
cb.set_label('Schmid factor')
elif color == 'ipf':
# build IPF image
ipf_image = np.zeros((*grains_slice.shape, 3), dtype=float)
for gid in gids:
o = self.get_grain(gid).orientation
try:
c = o.ipf_color(axis, symmetry=self.get_lattice().get_symmetry(), saturate=True)
except ValueError:
print('problem moving to the fundamental zone for rodrigues vector {}'.format(o.rod))
c = np.array([0., 0., 0.])
ipf_image[grains_slice == gid] = c
plt.imshow(ipf_image.transpose(1, 0, 2), interpolation='nearest')
else:
print('unknown color scheme requested, please chose between {random, '
'grain_ids, schmid, ipf}, returning')
return
ax.xaxis.set_label_position('top')
plt.xlabel('X')
plt.ylabel('Y')
if not self._is_empty('mask') and show_mask:
from pymicro.view.vol_utils import alpha_cmap
mask = self.get_mask()
plt.imshow(mask[:, :, slice].T, cmap=alpha_cmap(opacity=0.3))
# compute the center of mass of each grain of interest in this slice
if show_grain_ids or show_slip_traces:
centers = np.zeros((len(gids), 2))
sizes = np.zeros(len(gids))
for i, gid in enumerate(gids):
if highlight_ids and gid not in highlight_ids:
continue
sizes[i] = np.sum(grains_slice == gid)
centers[i] = ndimage.measurements.center_of_mass(
grains_slice == gid, grains_slice)
# grain id labels
if show_grain_ids:
for i, gid in enumerate(gids):
if highlight_ids and gid not in highlight_ids:
continue
plt.annotate('%d' % gids[i], xycoords='data',
xy=(centers[i, 0], centers[i, 1]),
horizontalalignment='center',
verticalalignment='center',
color='k',
fontsize=12)
# slip traces on this slice for a set of lattice planes
if show_slip_traces and hkl_planes:
for i, gid in enumerate(gids):
if highlight_ids and gid not in highlight_ids:
continue
g = self.get_grain(gid)
for hkl in hkl_planes:
trace = hkl.slip_trace(g.orientation,
n_int=[0, 0, 1],
view_up=[0, -1, 0],
trace_size=0.8 * np.sqrt(sizes[i]),
verbose=False)
color = 'k'
x = centers[i][0] + np.array([-trace[0] / 2, trace[0] / 2])
y = centers[i][1] + np.array([-trace[1] / 2, trace[1] / 2])
#plt.plot(centers[i][0], centers[i][1], 'o', color=color)
plt.plot(x, y, '-', linewidth=1, color=color)
extent = (-self.get_voxel_size() * grains_slice.shape[0] / 2,
self.get_voxel_size() * grains_slice.shape[0] / 2,
self.get_voxel_size() * grains_slice.shape[1] / 2,
-self.get_voxel_size() * grains_slice.shape[1] / 2)
plt.axis([0, grains_slice.shape[0], grains_slice.shape[1], 0])
if display:
plt.show()
return fig, ax
@staticmethod
def rand_cmap(n=4096, first_is_black=False, seed=13):
"""Creates a random color map to color the grains.
The first color can be enforced to black and usually figure out the
background. The random seed is fixed to consistently produce the same
colormap.
:param int n: the number of colors in the list.
:param bool first_is_black: set black as the first color of the list.
:param int seed: the random seed.
:return: a matplotlib colormap.
"""
np.random.seed(seed)
rand_colors = np.random.rand(n, 3)
if first_is_black:
rand_colors[0] = [0., 0., 0.] # enforce black background (value 0)
return colors.ListedColormap(rand_colors)
def ipf_cmap(self):
"""
Return a colormap with ipf colors.
.. warning::
This function works only for a microstructure with the cubic symmetry
due to current limitation in the `Orientation` get_ipf_colour method.
:return: a color map that can be directly used in pyplot.
"""
ipf_colors = np.zeros((4096, 3))
for grain in self.grains:
o = Orientation.from_rodrigues(grain['orientation'])
ipf_colors[grain['idnumber'], :] = o.get_ipf_colour()
return colors.ListedColormap(ipf_colors)
@staticmethod
def from_grain_file(grain_file_path, col_id=0, col_phi1=1, col_phi=2,
col_phi2=3, col_x=4, col_y=5, col_z=None,
col_volume=None, autodelete=True):
"""Create a `Microstructure` reading grain infos from a file.
This file is typically created using EBSD. the usual pattern is:
grain_id, phi1, phi, phi2, x, y, volume. The column number are tunable
using the function arguments.
"""
# get the file name without extension
name = os.path.splitext(os.path.basename(grain_file_path))[0]
print('creating microstructure %s' % name)
micro = Microstructure(name=name, overwrite_hdf5=True,
autodelete=autodelete)
grain = micro.grains.row
# read grain infos from the grain file
grains_EBSD = np.genfromtxt(grain_file_path)
for i in range(len(grains_EBSD)):
o = Orientation.from_euler([grains_EBSD[i, col_phi1],
grains_EBSD[i, col_phi],
grains_EBSD[i, col_phi2]])
grain['idnumber'] = int(grains_EBSD[i, col_id])
grain['orientation'] = o.rod
z = grains_EBSD[i, col_z] if col_z else 0.
grain['center'] = np.array([grains_EBSD[i, col_x],
grains_EBSD[i, col_y], z])
if col_volume:
grain['volume'] = grains_EBSD[i, col_volume]
grain.append()
micro.grains.flush()
return micro
def print_grains_info(self, grain_list=None, as_string=False):
""" Print informations on the grains in the microstructure"""
s = ''
if grain_list is None:
grain_list = self.get_tablecol(tablename='GrainDataTable',
colname='idnumber')
for row in self.grains:
if row['idnumber'] in grain_list:
o = Orientation.from_rodrigues(row['orientation'])
s = 'Grain %d\n' % (row['idnumber'])
s += ' * %s\n' % (o)
s += ' * center %s\n' % np.array_str(row['center'])
s += ' * volume %f\n' % (row['volume'])
if not (as_string):
print(s)
return s
@staticmethod
def match_grains(micro1, micro2, use_grain_ids=None, verbose=False):
return micro1.match_grains(micro2, use_grain_ids=use_grain_ids,
verbose=verbose)
def match_grains(self, micro2, mis_tol=1, use_grain_ids=None, verbose=False):
"""Match grains from a second microstructure to this microstructure.
This function try to find pair of grains based on their orientations.
.. warning::
This function works only for microstructures with the same symmetry.
:param micro2: the second instance of `Microstructure` from which
to match the grains.
:param float mis_tol: the tolerance is misorientation to use
to detect matches (in degrees).
:param list use_grain_ids: a list of ids to restrict the grains
in which to search for matches.
:param bool verbose: activate verbose mode.
:raise ValueError: if the microstructures do not have the same symmetry.
:return tuple: a tuple of three lists holding respectively the matches,
the candidates for each match and the grains that were unmatched.
"""
# TODO : Test
if not (self.get_lattice().get_symmetry()
== micro2.get_lattice().get_symmetry()):
raise ValueError('warning, microstructure should have the same '
'symmetry, got: {} and {}'.format(
self.get_lattice().get_symmetry(),
micro2.get_lattice().get_symmetry()))
candidates = []
matched = []
unmatched = [] # grain that were not matched within the given tolerance
# restrict the grain ids to match if needed
sym = self.get_lattice().get_symmetry()
if use_grain_ids is None:
grains_to_match = self.get_tablecol(tablename='GrainDataTable',
colname='idnumber')
else:
grains_to_match = use_grain_ids
# look at each grain
for i, g1 in enumerate(self.grains):
if not (g1['idnumber'] in grains_to_match):
continue
cands_for_g1 = []
best_mis = mis_tol
best_match = -1
o1 = Orientation.from_rodrigues(g1['orientation'])
for g2 in micro2.grains:
o2 = Orientation.from_rodrigues(g2['orientation'])
# compute disorientation
mis, _, _ = o1.disorientation(o2, crystal_structure=sym)
misd = np.degrees(mis)
if misd < mis_tol:
if verbose:
print('grain %3d -- candidate: %3d, misorientation:'
' %.2f deg' % (g1['idnumber'], g2['idnumber'],
misd))
# add this grain to the list of candidates
cands_for_g1.append(g2['idnumber'])
if misd < best_mis:
best_mis = misd
best_match = g2['idnumber']
# add our best match or mark this grain as unmatched
if best_match > 0:
matched.append([g1['idnumber'], best_match])
else:
unmatched.append(g1['idnumber'])
candidates.append(cands_for_g1)
if verbose:
print('done with matching')
print('%d/%d grains were matched ' % (len(matched),
len(grains_to_match)))
return matched, candidates, unmatched
def match_orientation(self, orientation, use_grain_ids=None):
"""Find the best match between an orientation and the grains from this
microstructure.
:param orientation: an instance of `Orientation` to match the grains.
:param list use_grain_ids: a list of ids to restrict the grains
in which to search for matches.
:return tuple: the grain id of the best match and the misorientation.
"""
sym = self.get_lattice().get_symmetry()
if use_grain_ids is None:
grains_to_match = self.get_tablecol(tablename='GrainDataTable',
colname='idnumber')
else:
grains_to_match = use_grain_ids
best_mis = 180.
for g in self.grains:
if not (g['idnumber'] in grains_to_match):
continue
o = Orientation.from_rodrigues(g['orientation'])
# compute disorientation
mis, _, _ = o.disorientation(orientation, crystal_structure=sym)
mis_deg = np.degrees(mis)
if mis_deg < best_mis:
best_mis = mis_deg
best_match = g['idnumber']
return best_match, best_mis
def find_neighbors(self, grain_id, distance=1):
"""Find the neighbor ids of a given grain.
This function find the ids of the neighboring grains. A mask is
constructed by dilating the grain to encompass the immediate
neighborhood of the grain. The ids can then be determined using numpy
unique function.
:param int grain_id: the grain id from which the neighbors need
to be determined.
:param int distance: the distance to use for the dilation (default
is 1 voxel).
:return: a list (possibly empty) of the neighboring grain ids.
"""
# get the bounding box around the grain
bb = self.grains.read_where('idnumber == %d' % grain_id)['bounding_box'][0]
grain_map = self.get_grain_map()[bb[0][0]:bb[0][1],
bb[1][0]:bb[1][1],
bb[2][0]:bb[2][1]]
if grain_map is None:
return []
grain_data = (grain_map == grain_id)
grain_data_dil = ndimage.binary_dilation(grain_data,
iterations=distance).astype(
np.uint8)
neighbor_ids = np.unique(grain_map[grain_data_dil - grain_data == 1])
return neighbor_ids.tolist()
def dilate_grain(self, grain_id, dilation_steps=1, use_mask=False):
"""Dilate a single grain overwriting the neighbors.
:param int grain_id: the grain id to dilate.
:param int dilation_steps: the number of dilation steps to apply.
:param bool use_mask: if True and that this microstructure has a mask,
the dilation will be limited by it.
"""
grain_map = self.get_grain_map(as_numpy=True)
grain_volume_init = (grain_map == grain_id).sum()
grain_data = grain_map == grain_id
grain_data = ndimage.binary_dilation(grain_data,
iterations=dilation_steps).astype(np.uint8)
if use_mask and not self._is_empty('mask'):
grain_data *= self.get_mask(as_numpy=True)
grain_map[grain_data == 1] = grain_id
grain_volume_final = (grain_map == grain_id).sum()
print('grain %s was dilated by %d voxels' % (grain_id,
grain_volume_final - grain_volume_init))
self.set_grain_map(grain_map, self.get_voxel_size(),
map_name=self.active_grain_map)
self.sync()
@staticmethod
def dilate_labels(array, dilation_steps=1, mask=None, dilation_ids=None,
struct=None):
"""Dilate labels isotropically to fill the gap between them.
This code is based on the gtDilateGrains function from the DCT code.
It has been extended to handle both 2D and 3D cases.
:param ndarray array: the numpy array to dilate.
:param int dilation_steps: the number of dilation steps to apply.
:param ndarray mask: a msk to constrain the dilation (None by default).
:param list dilation_ids: a list to restrict the dilation to the given
ids.
:param ndarray struct: the structuring element to use (strong
connectivity by default).
:return: the dilated array.
"""
from scipy import ndimage
if struct is None:
struct = ndimage.morphology.generate_binary_structure(array.ndim, 1)
assert struct.ndim == array.ndim
# carry out dilation in iterative steps
step = 0
while True:
if dilation_ids:
grains = np.isin(array, dilation_ids)
else:
grains = (array > 0).astype(np.uint8)
grains_dil = ndimage.morphology.binary_dilation(grains,
structure=struct).astype(np.uint8)
if mask is not None:
# only dilate within the mask
grains_dil *= mask.astype(np.uint8)
todo = (grains_dil - grains)
# get the list of voxel for this dilation step
if array.ndim == 2:
X, Y = np.where(todo)
else:
X, Y, Z = np.where(todo)
xstart = X - 1
xend = X + 1
ystart = Y - 1
yend = Y + 1
# check bounds
xstart[xstart < 0] = 0
ystart[ystart < 0] = 0
xend[xend > array.shape[0] - 1] = array.shape[0] - 1
yend[yend > array.shape[1] - 1] = array.shape[1] - 1
if array.ndim == 3:
zstart = Z - 1
zend = Z + 1
zstart[zstart < 0] = 0
zend[zend > array.shape[2] - 1] = array.shape[2] - 1
dilation = np.zeros_like(X).astype(np.int16)
print('%d voxels to replace' % len(X))
for i in range(len(X)):
if array.ndim == 2:
neighbours = array[xstart[i]:xend[i] + 1,
ystart[i]:yend[i] + 1]
else:
neighbours = array[xstart[i]:xend[i] + 1,
ystart[i]:yend[i] + 1,
zstart[i]:zend[i] + 1]
if np.any(neighbours):
# at least one neighboring voxel is non zero
counts = np.bincount(neighbours.flatten())[1:] # do not consider zero
# find the most frequent value
dilation[i] = np.argmax(counts) + 1
#dilation[i] = min(neighbours[neighbours > 0])
if array.ndim == 2:
array[X, Y] = dilation
else:
array[X, Y, Z] = dilation
print('dilation step %d done' % (step + 1))
step = step + 1
if step == dilation_steps:
break
if dilation_steps == -1:
if not np.any(array == 0):
break
return array
def dilate_grains(self, dilation_steps=1, dilation_ids=None,
new_map_name='dilated_grain_map'):
"""Dilate grains to fill the gap between them.
This function calls `dilate_labels` with the grain map of the
microstructure.
:param int dilation_steps: the number of dilation steps to apply.
:param list dilation_ids: a list to restrict the dilation to the given ids.
"""
if not self.__contains__('grain_map'):
raise ValueError('microstructure %s must have an associated '
'grain_map ' % self.get_sample_name())
return
grain_map = self.get_grain_map(as_numpy=True).copy()
# get rid of overlap regions flaged by -1
grain_map[grain_map == -1] = 0
if not self._is_empty('mask'):
grain_map = Microstructure.dilate_labels(grain_map,
dilation_steps=dilation_steps,
mask=self.get_mask(as_numpy=True),
dilation_ids=dilation_ids)
else:
grain_map = Microstructure.dilate_labels(grain_map,
dilation_steps=dilation_steps,
dilation_ids=dilation_ids)
# finally assign the dilated grain map to the microstructure
self.set_grain_map(grain_map, map_name=new_map_name)
def clean_grain_map(self, new_map_name='grain_map_clean'):
"""Apply a morphological cleaning treatment to the active grain map.
A Matlab morphological cleaner is called to smooth the morphology of
the different IDs in the grain map.
This cleaning treatment is typically used to improve the quality of a
mesh produced from the grain_map, or improved image based
mechanical modelisation techniques results, such as FFT-based
computational homogenization of the polycrystalline microstructure.
..Warning::
This method relies on the code of the 'core.utils' and on Matlab
code developed by <NAME> at the 'Centre des Matériaux, Mines
Paris'. These tools and codes must be installed and referenced
in the PATH of your workstation for this method to work. For
more details, see the 'utils' package.
"""
from pymicro.core.utils.SDZsetUtils.SDmeshers import SDImageMesher
Mesher = SDImageMesher(data=self)
Mesher.morphological_image_cleaner(
target_image_field=self.active_grain_map,
clean_fieldname=new_map_name, replace=True)
del Mesher
self.set_active_grain_map(new_map_name)
return
def mesh_grain_map(self, mesher_opts=dict(), print_output=False):
""" Create a 2D or 3D conformal mesh from the grain map.
A Matlab multiphase_image mesher is called to create a conformal mesh
of the grain map that is stored as a SampleData Mesh group in the
MeshData Group of the Microstructure dataset. The mesh data will
contain an element set per grain in the grain map.
..Warning::
This method relies on the code of the 'core.utils', on Matlab
code developed by <NAME> at the 'Centre des Matériaux, Mines
Paris', on the Zset software and the Mesh GMS software.
These tools and codes must be installed and referenced
in the PATH of your workstation for this method to work. For
more details, see the 'utils' package.
"""
from pymicro.core.utils.SDZsetUtils.SDmeshers import SDImageMesher
Mesher = SDImageMesher(data=self)
Mesher.multi_phase_mesher(
multiphase_image_name=self.active_grain_map,
meshname='MeshData', location='/', replace=True,
bin_fields_from_sets=False, mesher_opts=mesher_opts,
elset_id_field=True, print_output=print_output)
del Mesher
return
def crop(self, x_start=None, x_end=None, y_start=None, y_end=None,
z_start=None, z_end=None, crop_name=None, autodelete=False,
recompute_geometry=True, verbose=False):
"""Crop the microstructure to create a new one.
This method crops the CellData image group to a new microstructure,
and adapts the GrainDataTable to the crop.
:param int x_start: start value for slicing the first axis.
:param int x_end: end value for slicing the first axis.
:param int y_start: start value for slicing the second axis.
:param int y_end: end value for slicing the second axis.
:param int z_start: start value for slicing the third axis.
:param int z_end: end value for slicing the third axis.
:param str crop name: the name for the cropped microstructure
(the default is to append '_crop' to the initial name).
:param bool autodelete: a flag to delete the microstructure files
on the disk when it is not needed anymore.
:param bool recompute_geometry: If `True` (defaults), recompute the
grain centers, volumes, and bounding boxes in the croped micro.
Use `False` when using a crop that do not cut grains, for instance
when cropping a microstructure within the mask, to avoid the heavy
computational cost of the grain geometry data update.
:return: a new `Microstructure` instance with the cropped grain map.
"""
# TODO: add phase transfer to new microstructure
if self._is_empty('grain_map'):
print('warning: needs a grain map to crop the microstructure')
return
# input default values for bounds if not specified
if not x_start:
x_start = 0
if not y_start:
y_start = 0
if not z_start:
z_start = 0
if not x_end:
x_end = self.get_grain_map().shape[0]
if not y_end:
y_end = self.get_grain_map().shape[1]
if not z_end:
z_end = self.get_grain_map().shape[2]
if not crop_name:
crop_name = self.get_sample_name() + \
(not self.get_sample_name().endswith('_')) * '_' + 'crop'
# create new microstructure dataset
micro_crop = Microstructure(name=crop_name, overwrite_hdf5=True,
autodelete=autodelete)
micro_crop.set_lattice(self.get_lattice())
print('cropping microstructure to %s' % micro_crop.h5_file)
# crop CellData fields
image_group = self.get_node('CellData')
FIndex_path = os.path.join(image_group._v_pathname,'Field_index')
field_list = self.get_node(FIndex_path)
for name in field_list:
fieldname = name.decode('utf-8')
spacing_array = self.get_attribute('spacing','CellData')
print('cropping field %s' % fieldname)
field = self.get_field(fieldname)
if not self._is_empty(fieldname):
if self._get_group_type('CellData') == '2DImage':
field_crop = field[x_start:x_end,y_start:y_end, ...]
else:
field_crop = field[x_start:x_end,y_start:y_end,
z_start:z_end, ...]
empty = micro_crop.get_attribute(attrname='empty',
nodename='CellData')
if empty:
micro_crop.add_image_from_field(
field_array=field_crop, fieldname=fieldname,
imagename='CellData', location='/',
spacing=spacing_array, replace=True)
else:
micro_crop.add_field(gridname='CellData',
fieldname=fieldname,
array=field_crop, replace=True)
if verbose:
print('Cropped dataset:')
print(micro_crop)
micro_crop.set_active_grain_map(self.active_grain_map)
grain_ids = np.unique(micro_crop.get_grain_map(as_numpy=True))
for gid in grain_ids:
if not gid > 0:
continue
grain = self.grains.read_where('idnumber == gid')
micro_crop.grains.append(grain)
print('%d grains in cropped microstructure' % micro_crop.grains.nrows)
micro_crop.grains.flush()
# recompute the grain geometry
if recompute_geometry:
print('updating grain geometry')
micro_crop.recompute_grain_bounding_boxes(verbose)
micro_crop.recompute_grain_centers(verbose)
micro_crop.recompute_grain_volumes(verbose)
return micro_crop
def sync_grain_table_with_grain_map(self, sync_geometry=False):
"""Update GrainDataTable with only grain IDs from active grain map.
:param bool sync_geometry: If `True`, recomputes the geometrical
parameters of the grains in the GrainDataTable from active grain
map.
"""
# Remove grains that are not in grain map from GrainDataTable
self.remove_grains_not_in_map()
# Add grains that are in grain map but not in GrainDataTable
self.add_grains_in_map()
if sync_geometry:
self.recompute_grain_bounding_boxes()
self.recompute_grain_centers()
self.recompute_grain_volumes()
return
def renumber_grains(self, sort_by_size=False, new_map_name=None,
only_grain_map=False):
"""Renumber the grains in the microstructure.
Renumber the grains from 1 to n, with n the total number of grains
that are found in the active grain map array, so that the numbering is
consecutive. Only positive grain ids are taken into account (the id 0
is reserved for the background).
:param bool sort_by_size: use the grain volume to sort the grain ids
(the larger grain will become grain 1, etc).
:param bool overwrite_active_map: if 'True', overwrites the active
grain map with the renumbered map. If 'False', the active grain map
is kept and a 'renumbered_grain_map' is added to CellData.
:param str new_map_name: Used as name for the renumbered grain map
field if is not None and overwrite_active_map is False.
:param bool only_grain_map: If `True`, do not modify the grain map
and GrainDataTable in dataset, but return the renumbered grain_map
as a numpy array.
"""
if self._is_empty('grain_map'):
print('warning: a grain map is needed to renumber the grains')
return
self.sync_grain_table_with_grain_map()
# At this point, the table and the map have the same grain Ids
grain_map = self.get_grain_map(as_numpy=True)
grain_map_renum = grain_map.copy()
if sort_by_size:
print('sorting ids by grain size')
sizes = self.get_grain_volumes()
new_ids = self.get_grain_ids()[np.argsort(sizes)][::-1]
else:
new_ids = range(1, len(np.unique(grain_map)) + 1)
for i, g in enumerate(self.grains):
gid = g['idnumber']
if not gid > 0:
# only renumber positive grain ids
continue
new_id = new_ids[i]
grain_map_renum[grain_map == gid] = new_id
if not only_grain_map:
g['idnumber'] = new_id
g.update()
print('maxium grain id is now %d' % max(new_ids))
if only_grain_map:
return grain_map_renum
# assign the renumbered grain_map to the microstructure
if new_map_name is None:
map_name = self.active_grain_map
else:
map_name = new_map_name
self.set_grain_map(grain_map_renum, self.get_voxel_size(),
map_name=map_name)
return
def compute_grain_volume(self, gid):
"""Compute the volume of the grain given its id.
The total number of voxels with the given id is computed. The value is
converted to mm unit using the `voxel_size`. The unit will be squared
mm for a 2D grain map or cubed mm for a 3D grain map.
.. warning::
This function assume the grain bounding box is correct, call
`recompute_grain_bounding_boxes()` if this is not the case.
:param int gid: the grain id to consider.
:return: the volume of the grain.
"""
bb = self.grains.read_where('idnumber == %d' % gid)['bounding_box'][0]
grain_map = self.get_grain_map()[bb[0][0]:bb[0][1],
bb[1][0]:bb[1][1],
bb[2][0]:bb[2][1]]
voxel_size = self.get_attribute('spacing', 'CellData')
volume_vx = np.sum(grain_map == np.array(gid))
return volume_vx * np.prod(voxel_size)
def compute_grain_center(self, gid):
"""Compute the center of masses of a grain given its id.
.. warning::
This function assume the grain bounding box is correct, call
`recompute_grain_bounding_boxes()` if this is not the case.
:param int gid: the grain id to consider.
:return: a tuple with the center of mass in mm units
(or voxel if the voxel_size is not specified).
"""
# isolate the grain within the complete grain map
bb = self.grains.read_where('idnumber == %d' % gid)['bounding_box'][0]
grain_map = self.get_grain_map()[bb[0][0]:bb[0][1],
bb[1][0]:bb[1][1],
bb[2][0]:bb[2][1]]
voxel_size = self.get_attribute('spacing', 'CellData')
if len(voxel_size) == 2:
voxel_size = np.concatenate((voxel_size,np.array([0])), axis=0)
offset = bb[:, 0]
grain_data_bin = (grain_map == gid).astype(np.uint8)
local_com = ndimage.measurements.center_of_mass(grain_data_bin) \
+ np.array([0.5, 0.5, 0.5]) # account for first voxel coordinates
com = voxel_size * (offset + local_com
- 0.5 * np.array(self.get_grain_map().shape))
return com
def compute_grain_bounding_box(self, gid, as_slice=False):
"""Compute the grain bounding box indices in the grain map.
:param int gid: the id of the grain.
:param bool as_slice: a flag to return the grain bounding box as a slice.
:return: the bounding box coordinates.
"""
slices = ndimage.find_objects(self.get_grain_map() == np.array(gid))[0]
if as_slice:
return slices
x_indices = (slices[0].start, slices[0].stop)
y_indices = (slices[1].start, slices[1].stop)
z_indices = (slices[2].start, slices[2].stop)
return x_indices, y_indices, z_indices
def compute_grain_equivalent_diameters(self, id_list=None):
"""Compute the equivalent diameter for a list of grains.
:param list id_list: the list of the grain ids to include (compute
for all grains by default).
:return: a 1D numpy array of the grain diameters.
"""
grain_equivalent_diameters = 2 * (3 * self.get_grain_volumes(id_list) /
4 / np.pi) ** (1 / 3)
return grain_equivalent_diameters
def compute_grain_sphericities(self, id_list=None):
"""Compute the equivalent diameter for a list of grains.
The sphericity measures how close to a sphere is a given grain.
It can be computed by the ratio between the surface area of a sphere
with the same volume and the actual surface area of that grain.
.. math::
\psi = \dfrac{\pi^{1/3}(6V)^{2/3}}{A}
:param list id_list: the list of the grain ids to include (compute
for all grains by default).
:return: a 1D numpy array of the grain diameters.
"""
volumes = self.get_grain_volumes(id_list)
if not id_list:
id_list = self.get_grain_ids()
grain_map = self.get_grain_map(as_numpy=True)
if len(grain_map.shape) < 3:
raise ValueError('Cannot compute grain sphericities on a non'
' tridimensional grain map.')
surface_areas = np.empty_like(volumes)
for i, grain_id in enumerate(id_list):
grain_data = (grain_map == grain_id)
surface_areas[i] = np.sum(grain_data - ndimage.morphology.binary_erosion(grain_data))
sphericities = np.pi ** (1 / 3) * (6 * volumes) ** (2 / 3) / surface_areas
return sphericities
def compute_grain_aspect_ratios(self, id_list=None):
"""Compute the aspect ratio for a list of grains.
The aspect ratio is defined by the ratio between the major and minor
axes of the equivalent ellipsoid of each grain.
:param list id_list: the list of the grain ids to include (compute
for all grains by default).
:return: a 1D numpy array of the grain aspect ratios.
"""
from skimage.measure import regionprops
props = regionprops(self.get_grain_map(as_numpy=True))
grain_aspect_ratios = np.array([prop.major_axis_length /
prop.minor_axis_length
for prop in props])
return grain_aspect_ratios
def recompute_grain_volumes(self, verbose=False):
"""Compute the volume of all grains in the microstructure.
Each grain volume is computed using the grain map. The value is
assigned to the volume column of the GrainDataTable node.
If the voxel size is specified, the grain centers will be in mm unit,
if not in voxel unit.
.. note::
A grain map need to be associated with this microstructure instance
for the method to run.
:param bool verbose: flag for verbose mode.
:return: a 1D array with all grain volumes.
"""
if self._is_empty('grain_map'):
print('warning: needs a grain map to recompute the volumes '
'of the grains')
return
for g in self.grains:
try:
vol = self.compute_grain_volume(g['idnumber'])
except ValueError:
print('skipping grain %d' % g['idnumber'])
continue
if verbose:
print('grain {}, computed volume is {}'.format(g['idnumber'],
vol))
g['volume'] = vol
g.update()
self.grains.flush()
return self.get_grain_volumes()
def recompute_grain_centers(self, verbose=False):
"""Compute and assign the center of all grains in the microstructure.
Each grain center is computed using its center of mass. The value is
assigned to the grain.center attribute. If the voxel size is specified,
the grain centers will be in mm unit, if not in voxel unit.
.. note::
A grain map need to be associated with this microstructure instance
for the method to run.
:param bool verbose: flag for verbose mode.
:return: a 1D array with all grain centers.
"""
if self._is_empty('grain_map'):
print('warning: need a grain map to recompute the center of mass'
' of the grains')
return
for g in self.grains:
try:
com = self.compute_grain_center(g['idnumber'])
except ValueError:
print('skipping grain %d' % g['idnumber'])
continue
if verbose:
print('grain %d center: %.3f, %.3f, %.3f'
% (g['idnumber'], com[0], com[1], com[2]))
g['center'] = com
g.update()
self.grains.flush()
return self.get_grain_centers()
def recompute_grain_bounding_boxes(self, verbose=False):
"""Compute and assign the center of all grains in the microstructure.
Each grain center is computed using its center of mass. The value is
assigned to the grain.center attribute. If the voxel size is specified,
the grain centers will be in mm unit, if not in voxel unit.
.. note::
A grain map need to be associated with this microstructure instance
for the method to run.
:param bool verbose: flag for verbose mode.
"""
if self._is_empty('grain_map'):
print('warning: need a grain map to recompute the bounding boxes'
' of the grains')
return
# find_objects will return a list of N slices with N being the max grain id
slices = ndimage.find_objects(self.get_grain_map(as_numpy=True))
for g in self.grains:
try:
g_slice = slices[g['idnumber'] - 1]
x_indices = (g_slice[0].start, g_slice[0].stop)
y_indices = (g_slice[1].start, g_slice[1].stop)
z_indices = (g_slice[2].start, g_slice[2].stop)
bbox = x_indices, y_indices, z_indices
except (ValueError, TypeError):
print('skipping grain %d' % g['idnumber'])
continue
if verbose:
print('grain %d bounding box: [%d:%d, %d:%d, %d:%d]'
% (g['idnumber'], bbox[0][0], bbox[0][1], bbox[1][0],
bbox[1][1], bbox[2][0], bbox[2][1]))
g['bounding_box'] = bbox
g.update()
self.grains.flush()
return self.get_grain_bounding_boxes()
def compute_grains_geometry(self, overwrite_table=False):
""" Compute grain centers, volume and bounding box from grain_map """
#TODO revisit this method as we now rely on the grain bounding boxes to compute the geometry
grains_id = self.get_ids_from_grain_map()
if self.grains.nrows > 0 and overwrite_table:
self.grains.remove_rows(start=0)
for i in grains_id:
gidx = self.grains.get_where_list('(idnumber == i)')
if len(gidx) > 0:
gr = self.grains[gidx]
else:
gr = np.zeros((1,), dtype=self.grains.dtype)
gr['bounding_box'] = self.compute_grain_bounding_box(i)
gr['center'] = self.compute_grain_center(i)
gr['volume'] = self.compute_grain_volume(i)
if len(gidx) > 0:
self.grains[gidx] = gr
else:
self.grains.append(gr)
self.grains.flush()
return
def compute_grains_map_table_intersection(self, verbose=False):
"""Return grains that are both in grain map and grain table.
The method also returns the grains that are in the grain map but not
in the grain table, and the grains that are in the grain table, but
not in the grain map.
:return array intersection: array of grain ids that are in both
grain map and grain data table.
:return array not_in_map: array of grain ids that are in grain data
table but not in grain map.
:return array not_in_table: array of grain ids that are in grain map
but not in grain data table.
"""
if self._is_empty('grain_map'):
print('warning: a grain map is needed to compute grains map'
'/table intersection.')
return
grain_map = self.get_grain_map(as_numpy=True)
map_ids = np.unique(grain_map)
# only positive integer values are considered as valid grain ids, remove everything else:
map_ids = np.delete(map_ids, range(0, np.where(map_ids > 0)[0][0]))
table_ids = self.get_grain_ids()
intersection = np.intersect1d(map_ids, table_ids)
not_in_map = table_ids[np.isin(table_ids, map_ids, invert=True,
assume_unique=True)]
not_in_table = map_ids[np.isin(map_ids, table_ids, invert=True,
assume_unique=True)]
if verbose:
print('Grains IDs both in grain map and GrainDataTable:')
print(str(intersection).strip('[]'))
print('Grains IDs in GrainDataTable but not in grain map:')
print(str(not_in_map).strip('[]'))
print('Grains IDs in grain map but not in GrainDataTable :')
print(str(not_in_table).strip('[]'))
return intersection, not_in_map, not_in_table
def build_grain_table_from_grain_map(self):
"""Synchronizes and recomputes GrainDataTable from active grain map."""
# First step: synchronize table with grain map
self.sync_grain_table_with_grain_map()
# Second step, recompute grain geometry
# TODO: use recompute_grain_geometry when method is corrected
self.recompute_grain_bounding_boxes()
self.recompute_grain_centers()
self.recompute_grain_volumes()
return
@staticmethod
def voronoi(self, shape=(256, 256), n_grain=50):
"""Simple voronoi tesselation to create a grain map.
The method works both in 2 and 3 dimensions. The grains are labeled
from 1 to `n_grains` (included).
:param tuple shape: grain map shape in 2 or 3 dimensions.
:param int n_grains: number of grains to generate.
:return: a numpy array grain_map
"""
if len(shape) not in [2, 3]:
raise ValueError('specified shape must be either 2D or 3D')
grain_map = np.zeros(shape)
nx, ny = shape[0], shape[1]
x = np.linspace(-0.5, 0.5, nx, endpoint=True)
y = np.linspace(-0.5, 0.5, ny, endpoint=True)
if len(shape) == 3:
nz = shape[2]
z = np.linspace(-0.5, 0.5, nz, endpoint=True)
XX, YY = np.meshgrid(x, y)
seeds = Dx * np.random.rand(n_grains, 2)
distance = np.zeros(shape=(nx, ny, n_grains))
XX, YY, ZZ = np.meshgrid(x, y, z)
# TODO finish this...
return grain_map
def to_amitex_fftp(self, binary=True, mat_file=True, elasaniso_path='',
add_grips=False, grip_size=10,
grip_constants=(104100., 49440.), add_exterior=False,
exterior_size=10, use_mask=False):
"""Write orientation data to ascii files to prepare for FFT computation.
AMITEX_FFTP can be used to compute the elastoplastic response of
polycrystalline microstructures. The calculation needs orientation data
for each grain written in the form of the coordinates of the first two
basis vectors expressed in the crystal local frame which is given by
the first two rows of the orientation matrix. The values are written
in 6 files N1X.txt, N1Y.txt, N1Z.txt, N2X.txt, N2Y.txt, N2Z.txt, each
containing n values with n the number of grains. The data is written
either in BINARY or in ASCII form.
Additional options exist to pad the grain map with two constant regions.
One region called grips can be added on the top and bottom (third axis).
The second region is around the sample (first and second axes).
:param bool binary: flag to write the files in binary or ascii format.
:param bool mat_file: flag to write the material file for Amitex.
:param str elasaniso_path: path for the libUmatAmitex.so in
the Amitex_FFTP installation.
:param bool add_grips: add a constant region at the beginning and the
end of the third axis.
:param int grip_size: thickness of the region.
:param tuple grip_constants: elasticity values for the grip (lambda, mu).
:param bool add_exterior: add a constant region around the sample at
the beginning and the end of the first two axes.
:param int exterior_size: thickness of the exterior region.
:param bool use_mask: use mask to define exterior material, and use
mask to extrude grips with same shapes as microstructure top and
bottom surfaces.
"""
n_phases = self.get_number_of_phases()
ext = 'bin' if binary else 'txt'
grip_id = n_phases # material id for the grips
ext_id = n_phases + 1 if add_grips else n_phases # material id for the exterior
n1x = open('N1X.%s' % ext, 'w')
n1y = open('N1Y.%s' % ext, 'w')
n1z = open('N1Z.%s' % ext, 'w')
n2x = open('N2X.%s' % ext, 'w')
n2y = open('N2Y.%s' % ext, 'w')
n2z = open('N2Z.%s' % ext, 'w')
files = [n1x, n1y, n1z, n2x, n2y, n2z]
if binary:
import struct
for f in files:
f.write('%d \ndouble \n' % self.get_number_of_grains())
f.close()
n1x = open('N1X.%s' % ext, 'ab')
n1y = open('N1Y.%s' % ext, 'ab')
n1z = open('N1Z.%s' % ext, 'ab')
n2x = open('N2X.%s' % ext, 'ab')
n2y = open('N2Y.%s' % ext, 'ab')
n2z = open('N2Z.%s' % ext, 'ab')
for g in self.grains:
o = Orientation.from_rodrigues(g['orientation'])
g = o.orientation_matrix()
n1 = g[0] # first row
n2 = g[1] # second row
n1x.write(struct.pack('>d', n1[0]))
n1y.write(struct.pack('>d', n1[1]))
n1z.write(struct.pack('>d', n1[2]))
n2x.write(struct.pack('>d', n2[0]))
n2y.write(struct.pack('>d', n2[1]))
n2z.write(struct.pack('>d', n2[2]))
else:
for g in self.grains:
o = Orientation.from_rodrigues(g['orientation'])
g = o.orientation_matrix()
n1 = g[0] # first row
n2 = g[1] # second row
n1x.write('%f\n' % n1[0])
n1y.write('%f\n' % n1[1])
n1z.write('%f\n' % n1[2])
n2x.write('%f\n' % n2[0])
n2y.write('%f\n' % n2[1])
n2z.write('%f\n' % n2[2])
n1x.close()
n1y.close()
n1z.close()
n2x.close()
n2y.close()
n2z.close()
print('orientation data written for AMITEX_FFTP')
# if required, write the material file for Amitex
if mat_file:
from lxml import etree, builder
root = etree.Element('Materials')
root.append(etree.Element('Reference_Material',
Lambda0='90000.0',
Mu0='31000.0'))
# add each phase as a material for Amitex
phase_ids = self.get_phase_ids_list()
phase_ids.sort()
# here phase_ids needs to be equal to [1, ..., n_phases]
if not phase_ids == list(range(1, n_phases + 1)):
raise ValueError('inconsistent phase numbering (should be from '
'1 to n_phases): {}'.format(phase_ids))
for phase_id in phase_ids:
mat = etree.Element('Material', numM=str(phase_id),
Lib=os.path.join(elasaniso_path, 'libUmatAmitex.so'),
Law='elasaniso')
# get the C_IJ values
phase = self.get_phase(phase_id)
C = phase.get_symmetry().stiffness_matrix(phase.elastic_constants)
'''
Note that Amitex uses a different reduced number:
(1, 2, 3, 4, 5, 6) = (11, 22, 33, 12, 13, 23)
Because of this indices 4 and 6 are inverted with respect to the Voigt convention.
'''
mat.append(etree.Element(_tag='Coeff', Index='1', Type='Constant', Value=str(C[0, 0]))) # C11
mat.append(etree.Element(_tag='Coeff', Index='2', Type='Constant', Value=str(C[0, 1]))) # C12
mat.append(etree.Element(_tag='Coeff', Index='3', Type='Constant', Value=str(C[0, 2]))) # C13
mat.append(etree.Element(_tag='Coeff', Index='4', Type='Constant', Value=str(C[1, 1]))) # C22
mat.append(etree.Element(_tag='Coeff', Index='5', Type='Constant', Value=str(C[1, 2]))) # C23
mat.append(etree.Element(_tag='Coeff', Index='6', Type='Constant', Value=str(C[2, 2]))) # C33
mat.append(etree.Element(_tag='Coeff', Index='7', Type='Constant', Value=str(C[5, 5]))) # C66
mat.append(etree.Element(_tag='Coeff', Index='8', Type='Constant', Value=str(C[4, 4]))) # C55
mat.append(etree.Element(_tag='Coeff', Index='9', Type='Constant', Value=str(C[3, 3]))) # C44
fmt = "binary" if binary else "ascii"
mat.append(etree.Element(_tag='Coeff', Index="10", Type="Constant_Zone", File="N1X.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="11", Type="Constant_Zone", File="N1Y.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="12", Type="Constant_Zone", File="N1Z.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="13", Type="Constant_Zone", File="N2X.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="14", Type="Constant_Zone", File="N2Y.bin", Format=fmt))
mat.append(etree.Element(_tag='Coeff', Index="15", Type="Constant_Zone", File="N2Z.bin", Format=fmt))
root.append(mat)
# add a material for top and bottom layers
if add_grips:
grips = etree.Element('Material', numM=str(grip_id + 1),
Lib=os.path.join(elasaniso_path, 'libUmatAmitex.so'),
Law='elasiso')
grips.append(etree.Element(_tag='Coeff', Index='1', Type='Constant', Value=str(grip_constants[0])))
grips.append(etree.Element(_tag='Coeff', Index='2', Type='Constant', Value=str(grip_constants[1])))
root.append(grips)
# add a material for external buffer
if add_exterior or use_mask:
exterior = etree.Element('Material', numM=str(ext_id + 1),
Lib=os.path.join(elasaniso_path, 'libUmatAmitex.so'),
Law='elasiso')
exterior.append(etree.Element(_tag='Coeff', Index='1', Type='Constant', Value='0.'))
exterior.append(etree.Element(_tag='Coeff', Index='2', Type='Constant', Value='0.'))
root.append(exterior)
tree = etree.ElementTree(root)
tree.write('mat.xml', xml_declaration=True, pretty_print=True,
encoding='UTF-8')
print('material file written in mat.xml')
# if possible, write the vtk file to run the computation
if self.__contains__('grain_map'):
# convert the grain map to vtk file
from vtk.util import numpy_support
#TODO adapt to 2D grain maps
#TODO build a continuous grain map for amitex
# grain_ids = self.get_grain_map(as_numpy=True)
grain_ids = self.renumber_grains(only_grain_map=True)
if not self._is_empty('phase_map'):
# use the phase map for the material ids
material_ids = self.get_phase_map(as_numpy=True)
elif use_mask:
material_ids = self.get_mask(as_numpy=True).astype(
grain_ids.dtype)
else:
material_ids = np.ones_like(grain_ids)
if add_grips:
# add a layer of new_id (the value must actually be the first
# grain id) above and below the sample.
grain_ids = np.pad(grain_ids, ((0, 0),
(0, 0),
(grip_size, grip_size)),
mode='constant', constant_values=1)
if use_mask:
# create top and bottom mask extrusions
mask_top = material_ids[:,:,[-1]]
mask_bot = material_ids[:,:,[0]]
top_grip = np.tile(mask_top, (1,1,grip_size))
bot_grip = np.tile(mask_top, (1,1,grip_size))
# add grip layers to unit cell matID
material_ids = np.concatenate(
((grip_id+1)*bot_grip, material_ids,
(grip_id+1)*top_grip), axis=2)
else:
material_ids = np.pad(
material_ids, ((0, 0), (0, 0), (grip_size, grip_size)),
mode='constant', constant_values=grip_id+1)
if add_exterior and not use_mask:
# add a layer of new_id around the first two dimensions
grain_ids = np.pad(grain_ids, ((exterior_size, exterior_size),
(exterior_size, exterior_size),
(0, 0)),
mode='constant', constant_values=1)
material_ids = np.pad(material_ids,
((exterior_size, exterior_size),
(exterior_size, exterior_size),
(0, 0)),
mode='constant', constant_values=ext_id+1)
if use_mask:
grain_ids[np.where(grain_ids == 0)] = 1
material_ids[np.where(material_ids == 0)] = ext_id + 1
# write both arrays as VTK files for amitex
voxel_size = self.get_voxel_size()
for array, array_name in zip([grain_ids, material_ids],
['grain_ids', 'material_ids']):
print('array name:', array_name, 'array type:', array.dtype)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(array,
order='F'),
deep=1)
vtk_data_array.SetName(array_name)
grid = vtk.vtkImageData()
size = array.shape
grid.SetExtent(0, size[0], 0, size[1], 0, size[2])
grid.GetCellData().SetScalars(vtk_data_array)
grid.SetSpacing(voxel_size, voxel_size, voxel_size)
writer = vtk.vtkStructuredPointsWriter()
writer.SetFileName('%s_%s.vtk' % (self.get_sample_name(),
array_name))
if binary:
writer.SetFileTypeToBinary()
writer.SetInputData(grid)
writer.Write()
print('%s array written in legacy vtk form for AMITEX_FFTP' %
array_name)
def from_amitex_fftp(self, results_basename, grip_size=0, ext_size=0,
grip_dim=2, sim_prefix='Amitex', int_var_names=dict(),
finite_strain=False, load_fields=True,
compression_options=dict()):
"""Read output of a Amitex_fftp simulation and stores in dataset.
Read a Amitex_fftp result directory containing a mechanical simulation
of the microstructure. See method 'to_amitex_fftp' to generate input
files for such simulation of Microstructure instances.
The results are stored as fields of the CellData group by default.
If generated by the simulation, the strain and stress tensor fields
are stored, as well as the internal variables fields.
Mechanical fields and macroscopical curves are stored. The latter is
stored in the data group '/Mechanical_simulation' as a structured
array.
.. Warning 1::
For now, this methods can store the results of several
snapshots but without writing them as a xdmf time serie. This
feature will be implemented in the future.
.. Warning 2::
For now, results are only stored on CellData group. Method will
be modified in the future to allow to specify a new image data
group to store de results (created if needed).
:param results_basename: Basename of Amitex .std, .vtk output files to
load in dataset.
:type results_basename: str
:param grip_size: Thickness of the grips added to simulation unit cell
by the method 'to_amitex_fftp' of this class, defaults to 0. This
value corresponds to a number of voxels on both ends of the cell.
:type grip_size: int, optional
:param grip_dim: Dimension along which the tension test has been
simulated (0:x, 1:y, 2:z)
:type grip_dim: int, optional
:param ext_size: Thickness of the exterior region added to simulation
unit cell by the method 'to_amitex_fftp' of this class,
defaults to 0. This value corresponds to a number of voxels on
both ends of the cell.
:type ext_size: int, optional
:param sim_prefix: Prefix of the name of the fields that will be
stored on the CellData group from simulation results.
:type sim_prefix: str, optional
:param int_var_names: Dictionnary whose keys are the names of
internal variables stored in Amitex output files
(varInt1, varInt2...) and values are corresponding names for
these variables in the dataset.
:type int_var_names: dict, optional
"""
# TODO: add grain map to all time steps
from pymicro.core.utils.SDAmitexUtils import SDAmitexIO
# Get std file result
p_mstd = Path(str(results_basename)).absolute().with_suffix('.mstd')
# safety check
if not p_mstd.exists():
raise ValueError('results not found, "results_basename" argument'
' not associated with Amitex_fftp simulation'
' results.')
# load .mstd results --> only lines of microstructure material mean
# values
step = 1
if grip_size > 0:
step += 1
if ext_size >0 :
step += 1
elif not self._is_empty('mask'):
# if mask used as exterior in computation but exterior size = 0
# still eed to add 1 to step as .mstd will have three lines per
# increment
if np.any(self['mask'] == 0):
step += 1
std_res = SDAmitexIO.load_std(p_mstd, step=step)
# load .zstd results
p_zstd = Path(str(results_basename)+'_1').with_suffix('.zstd')
if p_zstd.exists():
zstd_res = SDAmitexIO.load_std(p_zstd, Int_var_names=int_var_names)
# Store macro data in specific group
self.add_group(groupname=f'{sim_prefix}_Results', location='/',
indexname='fft_sim', replace=True)
# std_res is a numpy structured array whose fields depend on
# the type of output (finite strain ou infinitesimal strain sim.)
# ==> we load it into the dataset as a structured table data item.
self.add_table(location='fft_sim', name='Standard_output',
indexname=f'{sim_prefix}_std',
description=std_res.dtype, data=std_res)
# idem for zstd --> Add as Mechanical Grain Data Table
if p_zstd.exists():
self.add_table(location='GrainData',
name='MechanicalGrainDataTable',
indexname='Mech_Grain_Data',
description=zstd_res.dtype, data=zstd_res)
grainIDs = self.get_grain_ids()
N_zone_times = int(zstd_res.shape[0]/ len(grainIDs))
dtype_col = np.dtype([('grain_ID', np.int)])
IDs = np.tile(grainIDs, N_zone_times).astype(dtype_col)
self.add_tablecols(tablename='MechanicalGrainDataTable',
description=IDs.dtype, data=IDs)
# End of macroscopic data loading. Check if field data must be loaded.
if not load_fields:
return
# Get vtk files results
Stress, Strain, VarInt, Incr_list = SDAmitexIO.load_amitex_output_fields(
results_basename, grip_size=grip_size, ext_size=ext_size,
grip_dim=2)
## Loop over time steps: create group to store results
self.add_group(groupname=f'{sim_prefix}_fields',
location='/CellData', indexname='fft_fields',
replace=True)
# Create CellData temporal subgrids for each time value with a vtk
# field output
time_values = std_res['time'][Incr_list].squeeze()
if len(time_values) == 0:
time_values = [0.]
self.add_grid_time('CellData', time_values)
# Add fields to CellData grid collections
for incr in Stress:
Time = std_res['time'][incr].squeeze()
fieldname = f'{sim_prefix}_stress'
self.add_field(gridname='CellData', fieldname=fieldname,
array=Stress[incr], location='fft_fields',
time=Time, compression_options=compression_options)
for incr in Strain:
Time = std_res['time'][incr].squeeze()
fieldname = f'{sim_prefix}_strain'
self.add_field(gridname='CellData', fieldname=fieldname,
array=Strain[incr], location='fft_fields',
time=Time, compression_options=compression_options)
for mat in VarInt:
for incr in VarInt[mat]:
Time = std_res['time'][incr].squeeze()
for var in VarInt[mat][incr]:
varname = var
if int_var_names.__contains__(var):
varname = int_var_names[var]
fieldname = f'{sim_prefix}_mat{mat}_{varname}'
self.add_field(gridname='CellData', fieldname=fieldname,
array=VarInt[mat][incr][var],
location='fft_fields', time=Time,
compression_options=compression_options)
return
def print_zset_material_block(self, mat_file, grain_prefix='_ELSET'):
"""
Outputs the material block corresponding to this microstructure for
a finite element calculation with z-set.
:param str mat_file: The name of the file where the material behaviour
is located
:param str grain_prefix: The grain prefix used to name the elsets
corresponding to the different grains
"""
f = open('elset_list.txt', 'w')
# TODO : test
for g in self.grains:
o = Orientation.from_rodrigues(g['orientation'])
f.write(' **elset %s%d *file %s *integration '
'theta_method_a 1.0 1.e-9 150 *rotation '
'%7.3f %7.3f %7.3f\n' % (grain_prefix, g['idnumber'],
mat_file,
o.phi1(), o.Phi(), o.phi2()))
f.close()
return
def to_dream3d(self):
"""Write the microstructure as a hdf5 file compatible with DREAM3D."""
import time
f = h5py.File('%s.h5' % self.get_sample_name(), 'w')
f.attrs['FileVersion'] = np.string_('7.0')
f.attrs['DREAM3D Version'] = np.string_('6.1.77.d28a796')
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
f.attrs['file_time'] = time.time()
# pipeline group (empty here)
pipeline = f.create_group('Pipeline')
pipeline.attrs['Number_Filters'] = np.int32(0)
# create the data container group
data_containers = f.create_group('DataContainers')
m = data_containers.create_group('DataContainer')
# ensemble data
ed = m.create_group('EnsembleData')
ed.attrs['AttributeMatrixType'] = np.uint32(11)
ed.attrs['TupleDimensions'] = np.uint64(2)
cryst_structure = ed.create_dataset('CrystalStructures',
data=np.array([[999], [1]],
dtype=np.uint32))
cryst_structure.attrs['ComponentDimensions'] = np.uint64(1)
cryst_structure.attrs['DataArrayVersion'] = np.int32(2)
cryst_structure.attrs['ObjectType'] = np.string_('DataArray<uint32_t>')
cryst_structure.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
cryst_structure.attrs['TupleDimensions'] = np.uint64(2)
mat_name = ed.create_dataset('MaterialName',
data=[a.encode('utf8')
for a in ['Invalid Phase', 'Unknown']])
mat_name.attrs['ComponentDimensions'] = np.uint64(1)
mat_name.attrs['DataArrayVersion'] = np.int32(2)
mat_name.attrs['ObjectType'] = np.string_('StringDataArray')
mat_name.attrs['Tuple Axis Dimensions'] = np.string_('x=2')
mat_name.attrs['TupleDimensions'] = np.uint64(2)
# feature data
fd = m.create_group('FeatureData')
fd.attrs['AttributeMatrixType'] = np.uint32(7)
fd.attrs['TupleDimensions'] = np.uint64(self.grains.nrows)
Euler = np.array([Orientation.from_rodrigues(g['orientation'])
for g in self.grains], dtype=np.float32)
avg_euler = fd.create_dataset('AvgEulerAngles', data=Euler)
avg_euler.attrs['ComponentDimensions'] = np.uint64(3)
avg_euler.attrs['DataArrayVersion'] = np.int32(2)
avg_euler.attrs['ObjectType'] = np.string_('DataArray<float>')
avg_euler.attrs['Tuple Axis Dimensions'] = np.string_('x=%d' %
self.grains.nrows)
avg_euler.attrs['TupleDimensions'] = np.uint64(self.grains.nrows)
# geometry
geom = m.create_group('_SIMPL_GEOMETRY')
geom.attrs['GeometryType'] = np.uint32(999)
geom.attrs['GeometryTypeName'] = np.string_('UnkownGeometry')
# create the data container bundles group
f.create_group('DataContainerBundles')
f.close()
@staticmethod
def from_dream3d(file_path, main_key='DataContainers',
data_container='DataContainer', grain_data='FeatureData',
grain_orientations='AvgEulerAngles',
orientation_type='euler', grain_centroid='Centroids'):
"""Read a microstructure from a hdf5 file.
:param str file_path: the path to the hdf5 file to read.
:param str main_key: the string describing the root key.
:param str data_container: the string describing the data container
group in the hdf5 file.
:param str grain_data: the string describing the grain data group in the
hdf5 file.
:param str grain_orientations: the string describing the average grain
orientations in the hdf5 file.
:param str orientation_type: the string describing the descriptor used
for orientation data.
:param str grain_centroid: the string describing the grain centroid in
the hdf5 file.
:return: a `Microstructure` instance created from the hdf5 file.
"""
head, tail = os.path.split(file_path)
micro = Microstructure(name=tail, file_path=head, overwrite_hdf5=True)
with h5py.File(file_path, 'r') as f:
grain_data_path = '%s/%s/%s' % (main_key, data_container, grain_data)
orientations = f[grain_data_path][grain_orientations].value
if grain_centroid:
centroids = f[grain_data_path][grain_centroid].value
offset = 0
if len(centroids) < len(orientations):
offset = 1 # if grain 0 has not a centroid
grain = micro.grains.row
for i in range(len(orientations)):
grain['idnumber'] = i
if orientations[i, 0] == 0. and orientations[i, 1] == 0. and \
orientations[i, 2] == 0.:
# skip grain 0 which is always (0., 0., 0.)
print('skipping (0., 0., 0.)')
continue
if orientation_type == 'euler':
grain['orientation'] = Orientation.from_euler(
orientations[i] * 180 / np.pi).rod
elif orientation_type == 'rodrigues':
grain['orientation'] = Orientation.from_rodrigues(
orientations[i]).rod
if grain_centroid:
grain['center'] = centroids[i - offset]
grain.append()
micro.grains.flush()
return micro
@staticmethod
def copy_sample(src_micro_file, dst_micro_file, overwrite=False,
get_object=False, dst_name=None, autodelete=False):
""" Initiate a new SampleData object and files from existing one"""
SampleData.copy_sample(src_micro_file, dst_micro_file, overwrite,
new_sample_name=dst_name)
if get_object:
return Microstructure(filename=dst_micro_file,
autodelete=autodelete)
else:
return
@staticmethod
def from_neper(neper_file_path):
"""Create a microstructure from a neper tesselation.
Neper is an open source program to generate polycristalline
microstructure using voronoi tesselations. It is available at
https://neper.info
:param str neper_file_path: the path to the tesselation file generated
by Neper.
:return: a pymicro `Microstructure` instance.
"""
neper_file = neper_file_path.split(os.sep)[-1]
neper_dir = os.path.dirname(neper_file_path)
print('creating microstructure from Neper tesselation %s' % neper_file)
name, ext = os.path.splitext(neper_file)
print(name, ext)
filename = os.path.join(neper_dir, name)
assert ext == '.tesr' # assuming raster tesselation
micro = Microstructure(name=name, filename=filename, overwrite_hdf5=True)
with open(neper_file_path, 'r', encoding='latin-1') as f:
line = f.readline() # ***tesr
# look for **general
while True:
line = f.readline().strip() # get rid of unnecessary spaces
if line.startswith('**general'):
break
dim = f.readline().strip()
print(dim)
dims = np.array(f.readline().split()).astype(int).tolist()
print(dims)
voxel_size = np.array(f.readline().split()).astype(float).tolist()
print(voxel_size)
# look for **cell
while True:
line = f.readline().strip()
if line.startswith('**cell'):
break
n = int(f.readline().strip())
print('microstructure contains %d grains' % n)
f.readline() # *id
grain_ids = []
# look for *ori
while True:
line = f.readline().strip()
if line.startswith('*ori'):
break
else:
grain_ids.extend(np.array(line.split()).astype(int).tolist())
print('grain ids are:', grain_ids)
oridescriptor = f.readline().strip() # must be euler-bunge:passive
if oridescriptor != 'euler-bunge:passive':
print('Wrong orientation descriptor: %s, must be '
'euler-bunge:passive' % oridescriptor)
grain = micro.grains.row
for i in range(n):
euler_angles = np.array(f.readline().split()).astype(float).tolist()
print('adding grain %d' % grain_ids[i])
grain['idnumber'] = grain_ids[i]
grain['orientation'] = Orientation.from_euler(euler_angles).rod
grain.append()
micro.grains.flush()
# look for **data and handle *group if present
phase_ids = None
while True:
line = f.readline().strip()
if line.startswith('*group'):
print('multi phase sample')
phase_ids = []
while True:
line = f.readline().strip()
if line.startswith('**data'):
break
else:
phase_ids.extend(np.array(line.split()).astype(int).tolist())
print('phase ids are:', phase_ids)
if line.startswith('**data'):
break
print(f.tell())
print('reading data from byte %d' % f.tell())
data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values
print(data.shape)
assert np.prod(dims) == data.shape[0]
micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0),
voxel_size=voxel_size[0]) # swap X/Z axes
print('updating grain geometry')
micro.recompute_grain_bounding_boxes()
micro.recompute_grain_centers()
micro.recompute_grain_volumes()
# if necessary set the phase_map
if phase_ids:
grain_map = micro.get_grain_map(as_numpy=True)
phase_map = np.zeros_like(grain_map)
for grain_id, phase_id in zip(grain_ids, phase_ids):
# ignore phase id == 1 as this corresponds to phase_map == 0
if phase_id > 1:
phase_map[grain_map == grain_id] = phase_id - 1
micro.set_phase_map(phase_map)
print('done')
return micro
@staticmethod
def from_labdct(labdct_file, data_dir='.', include_IPF_map=False,
include_rodrigues_map=False):
"""Create a microstructure from a DCT reconstruction.
:param str labdct_file: the name of the file containing the labDCT data.
:param str data_dir: the path to the folder containing the HDF5
reconstruction file.
:param bool include_IPF_map: if True, the IPF maps will be included
in the microstructure fields.
:param bool include_rodrigues_map: if True, the rodrigues map will be
included in the microstructure fields.
:return: a `Microstructure` instance created from the labDCT
reconstruction file.
"""
file_path = os.path.join(data_dir, labdct_file)
print('creating microstructure for labDCT scan %s' % file_path)
name, ext = os.path.splitext(labdct_file)
# get the phase data
with h5py.File(file_path, 'r') as f:
#TODO handle multiple phases
phase01 = f['PhaseInfo']['Phase01']
phase_name = phase01['Name'][0].decode('utf-8')
parameters = phase01['UnitCell'][()] # length unit is angstrom
a, b, c = parameters[:3] / 10 # use nm unit
alpha, beta, gamma = parameters[3:]
print(parameters)
sym = Lattice.guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma)
print('found %s symmetry' % sym)
lattice = Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=sym)
phase = CrystallinePhase(phase_id=1, name=phase_name, lattice=lattice)
# create the microstructure with the phase infos
micro = Microstructure(name=name, path=data_dir, overwrite_hdf5=True, phase=phase)
# load cell data
with h5py.File(file_path, 'r') as f:
spacing = f['LabDCT']['Spacing'][0]
rodrigues_map = f['LabDCT']['Data']['Rodrigues'][()].transpose(2, 1, 0, 3)
grain_map = f['LabDCT']['Data']['GrainId'][()].transpose(2, 1, 0)
print('adding cell data with shape {}'.format(grain_map.shape))
micro.set_grain_map(grain_map, voxel_size=spacing)
mask = f['LabDCT']['Data']['Mask'][()].transpose(2, 1, 0)
micro.set_mask(mask, voxel_size=spacing)
phase_map = f['LabDCT']['Data']['PhaseId'][()].transpose(2, 1, 0)
micro.set_phase_map(phase_map, voxel_size=spacing)
if include_IPF_map:
IPF001_map = f['LabDCT']['Data']['IPF001'][()].transpose(2, 1, 0, 3)
micro.add_field(gridname='CellData', fieldname='IPF001_map',
array=IPF001_map)
IPF010_map = f['LabDCT']['Data']['IPF010'][()].transpose(2, 1, 0, 3)
micro.add_field(gridname='CellData', fieldname='IPF010_map',
array=IPF010_map)
IPF100_map = f['LabDCT']['Data']['IPF100'][()].transpose(2, 1, 0, 3)
micro.add_field(gridname='CellData', fieldname='IPF100_map',
array=IPF100_map)
if include_rodrigues_map:
micro.add_field(gridname='CellData', fieldname='rodrigues_map',
array=rodrigues_map)
# create grain data table infos
grain_ids = np.unique(grain_map)
print(grain_ids)
micro.build_grain_table_from_grain_map()
# now get each grain orientation from the rodrigues map
for i, g in enumerate(micro.grains):
gid = g['idnumber']
progress = (1 + i) / len(micro.grains)
print('adding grains: {0:.2f} %'.format(progress), end='\r')
x, y, z = np.where(micro.get_grain_map() == gid)
orientation = rodrigues_map[x[0], y[0], z[0]]
# assign orientation to this grain
g['orientation'] = orientation
g.update()
micro.grains.flush()
return micro
@staticmethod
def from_dct(data_dir='.', grain_file='index.mat',
vol_file='phase_01_vol.mat', mask_file='volume_mask.mat',
use_dct_path=True, verbose=True):
"""Create a microstructure from a DCT reconstruction.
DCT reconstructions are stored in several files. The indexed grain
informations are stored in a matlab file in the '4_grains/phase_01'
folder. Then, the reconstructed volume file (labeled image) is stored
in the '5_reconstruction' folder as an hdf5 file, possibly stored
alongside a mask file coming from the absorption reconstruction.
:param str data_dir: the path to the folder containing the
reconstruction data.
:param str grain_file: the name of the file containing grains info.
:param str vol_file: the name of the volume file.
:param str mask_file: the name of the mask file.
:param bool use_dct_path: if True, the grain_file should be located in
4_grains/phase_01 folder and the vol_file and mask_file in the
5_reconstruction folder.
:param bool verbose: activate verbose mode.
:return: a `Microstructure` instance created from the DCT reconstruction.
"""
if data_dir == '.':
data_dir = os.getcwd()
if data_dir.endswith(os.sep):
data_dir = data_dir[:-1]
scan = data_dir.split(os.sep)[-1]
print('creating microstructure for DCT scan %s' % scan)
filename = os.path.join(data_dir,scan)
micro = Microstructure(filename=filename, overwrite_hdf5=True)
micro.data_dir = data_dir
if use_dct_path:
index_path = os.path.join(data_dir, '4_grains', 'phase_01',
grain_file)
else:
index_path = os.path.join(data_dir, grain_file)
print(index_path)
if not os.path.exists(index_path):
raise ValueError('%s not found, please specify a valid path to the'
' grain file.' % index_path)
return None
from scipy.io import loadmat
index = loadmat(index_path)
#TODO fetch pixel size from detgeo instead
voxel_size = index['cryst'][0][0][25][0][0]
# grab the crystal lattice
lattice_params = index['cryst'][0][0][3][0]
sym = Symmetry.from_string(index['cryst'][0][0][7][0])
print('creating crystal lattice {} ({}) with parameters {}'
''.format(index['cryst'][0][0][0][0], sym, lattice_params))
lattice_params[:3] /= 10 # angstrom to nm
lattice = Lattice.from_parameters(*lattice_params, symmetry=sym)
micro.set_lattice(lattice)
# add all grains to the microstructure
grain = micro.grains.row
for i in range(len(index['grain'][0])):
grain['idnumber'] = index['grain'][0][i][0][0][0][0][0]
grain['orientation'] = index['grain'][0][i][0][0][3][0]
grain['center'] = index['grain'][0][i][0][0][15][0]
grain.append()
micro.grains.flush()
# load the grain map if available
if use_dct_path:
grain_map_path = os.path.join(data_dir, '5_reconstruction',
vol_file)
else:
grain_map_path = os.path.join(data_dir, vol_file)
if os.path.exists(grain_map_path):
with h5py.File(grain_map_path, 'r') as f:
# because how matlab writes the data, we need to swap X and Z
# axes in the DCT volume
micro.set_grain_map(f['vol'][()].transpose(2, 1, 0), voxel_size)
if verbose:
print('loaded grain ids volume with shape: {}'
''.format(micro.get_grain_map().shape))
# load the mask if available
if use_dct_path:
mask_path = os.path.join(data_dir, '5_reconstruction', mask_file)
else:
mask_path = os.path.join(data_dir, mask_file)
if os.path.exists(mask_path):
try:
with h5py.File(mask_path, 'r') as f:
mask = f['vol'][()].transpose(2, 1, 0).astype(np.uint8)
# check if mask shape needs to be zero padded
if not mask.shape == micro.get_grain_map().shape:
offset = np.array(micro.get_grain_map().shape) - np.array(mask.shape)
padding = [(o // 2, o // 2) for o in offset]
print('mask padding is {}'.format(padding))
mask = np.pad(mask, padding, mode='constant')
print('now mask shape is {}'.format(mask.shape))
micro.set_mask(mask, voxel_size)
except:
# fallback on matlab format
micro.set_mask(loadmat(mask_path)['vol'], voxel_size)
if verbose:
print('loaded mask volume with shape: {}'.format(micro.get_mask().shape))
return micro
@staticmethod
def from_legacy_h5(file_path, filename=None):
"""read a microstructure object from a HDF5 file created by pymicro
until version 0.4.5.
:param str file_path: the path to the file to read.
:return: the new `Microstructure` instance created from the file.
"""
with h5py.File(file_path, 'r') as f:
if filename is None:
filename = f.attrs['microstructure_name']
micro = Microstructure(name=filename, overwrite_hdf5=True)
if 'symmetry' in f['EnsembleData/CrystalStructure'].attrs:
sym = f['EnsembleData/CrystalStructure'].attrs['symmetry']
parameters = f['EnsembleData/CrystalStructure/LatticeParameters'][()]
micro.set_lattice(Lattice.from_symmetry(Symmetry.from_string(sym),
parameters))
if 'data_dir' in f.attrs:
micro.data_dir = f.attrs['data_dir']
# load feature data
if 'R_vectors' in f['FeatureData']:
print('some grains')
avg_rods = f['FeatureData/R_vectors'][()]
print(avg_rods.shape)
if 'grain_ids' in f['FeatureData']:
grain_ids = f['FeatureData/grain_ids'][()]
else:
grain_ids = range(1, 1 + avg_rods.shape[0])
if 'centers' in f['FeatureData']:
centers = f['FeatureData/centers'][()]
else:
centers = np.zeros_like(avg_rods)
# add all grains to the microstructure
grain = micro.grains.row
for i in range(avg_rods.shape[0]):
grain['idnumber'] = grain_ids[i]
grain['orientation'] = avg_rods[i, :]
grain['center'] = centers[i]
grain.append()
micro.grains.flush()
# load cell data
if 'grain_ids' in f['CellData']:
micro.set_grain_map(f['CellData/grain_ids'][()],
f['CellData/grain_ids'].attrs['voxel_size'])
micro.recompute_grain_bounding_boxes()
micro.recompute_grain_volumes()
if 'mask' in f['CellData']:
micro.set_mask(f['CellData/mask'][()],
f['CellData/mask'].attrs['voxel_size'])
return micro
@staticmethod
def from_ebsd(file_path, roi=None, tol=5., min_ci=0.2):
""""Create a microstructure from an EBSD scan.
:param str file_path: the path to the file to read.
:param list roi: a list of 4 integers in the form [x1, x2, y1, y2]
to crop the EBSD scan.
:param float tol: the misorientation angle tolerance to segment
the grains (default is 5 degrees).
:param float min_ci: minimum confidence index for a pixel to be a valid
EBSD measurement.
:return: a new instance of `Microstructure`.
"""
# Get name of file and create microstructure instance
name = os.path.splitext(os.path.basename(file_path))[0]
micro = Microstructure(name=name, autodelete=False, overwrite_hdf5=True)
from pymicro.crystal.ebsd import OimScan
# Read raw EBSD .h5 data file from OIM
scan = OimScan.from_file(file_path)
micro.set_phases(scan.phase_list)
if roi:
print('importing data from region {}'.format(roi))
scan.cols = roi[1] - roi[0]
scan.rows = roi[3] - roi[2]
scan.iq = scan.iq[roi[0]:roi[1], roi[2]:roi[3]]
scan.ci = scan.ci[roi[0]:roi[1], roi[2]:roi[3]]
scan.euler = scan.euler[roi[0]:roi[1], roi[2]:roi[3], :]
# change the orientation reference frame to XYZ
scan.change_orientation_reference_frame()
iq = scan.iq
ci = scan.ci
euler = scan.euler
mask = np.ones_like(iq)
# segment the grains
grain_ids = scan.segment_grains(tol=tol, min_ci=min_ci)
voxel_size = np.array([scan.xStep, scan.yStep])
micro.set_grain_map(grain_ids, voxel_size)
# add each array to the data file to the CellData image Group
micro.add_field(gridname='CellData', fieldname='mask', array=mask,
replace=True)
micro.add_field(gridname='CellData', fieldname='iq', array=iq,
replace=True)
micro.add_field(gridname='CellData', fieldname='ci', array=ci,
replace=True)
micro.add_field(gridname='CellData', fieldname='euler',
array=euler, replace=True)
# Fill GrainDataTable
grains = micro.grains.row
grain_ids_list = np.unique(grain_ids).tolist()
for gid in grain_ids_list:
if gid == 0:
continue
progress = 100 * (1 + grain_ids_list.index(gid)) / len(grain_ids_list)
print('creating new grains [{:.2f} %]: adding grain {:d}'.format(
progress, gid), end='\r')
# get the symmetry for this grain
phase_grain = scan.phase[np.where(grain_ids == 1)]
assert len(np.unique(phase_grain)) == 1 # all pixel of this grain must have the same phase id by
# construction
grain_phase_id = phase_grain[0]
sym = scan.phase_list[grain_phase_id].get_symmetry()
# compute the mean orientation for this grain
euler_grain = scan.euler[ | np.where(grain_ids == gid) | numpy.where |
import numpy as np
class Spectrum(object):
"""A useful container for nuclear engineering applications.
This description will be updated in the future."""
def __init__(self, edges, values, error, form='int', floor=0):
"""Create a spectrum object and its associated values.
Also calculates some useful things."""
# check array inputs
assert type(edges) in (list, tuple, np.ndarray), "Bin edges must be of type list, tuple, or ndarray."
assert type(values) in (list, tuple, np.ndarray), "Values must be of type list, tuple, or ndarray."
assert type(error) in (int, list, tuple, np.ndarray), "Error must be of type int, list, tuple, or ndarray."
assert type(floor) in (int, float), "Floor must be either int or float."
# check spectrum form
message = "Spectrum form option must be literal in ('int', 'integral', 'dif', 'diff', 'differential')."
assert form in ('int', 'integral', 'dif', 'diff', 'differential'), message
# allow error to be input as 0, which implies that all error is zero
if type(error) is int:
assert error is 0, "Error integer input only allows 0."
# then rewrite error
error = np.zeros(len(values))
# check val and err lengths
message = "Values and error must have same length."
assert len(values) == len(error), message
# check edges and val lengths
message = "Inconsistency in number of edges/values."
assert len(edges) in (len(values), len(values) + 1), message
# guarantee that bin edges are unique and increasing or decreasing
message = "Bin edge values must be unique and strictly increasing or decreasing."
assert all(x < y for x, y in zip(edges, edges[1:])) or all(x > y for x, y in zip(edges, edges[1:])), message
# convert all data types to numpy arrays
values = np.array(values)
error = np.array(error)
# special handling for different size edges
if len(edges) == len(values):
edges = np.concatenate((np.array([floor]), np.array(edges)))
elif len(edges) == len(values) + 1:
edges = np.array(edges)
# calculate midpoints and bin edges
self.widths = edges[1:] - edges[:-1]
self.midpoints = (edges[1:] + edges[:-1]) / 2
# store other primary values
self.edges = edges
# store values and error in differential and integral forms
if form in ('int', 'integral'):
self.int = values
self.int_error = error
self.diff = self.int / self.widths
self.diff_error = self.int_error / self.widths
elif form in ('dif', 'diff', 'differential'):
self.diff = values
self.diff_error = error
self.int = self.diff * self.widths
self.int_error = self.diff_error * self.widths
return
def __add__(self):
raise NotImplementedError
def __radd__(self):
raise NotImplementedError
def __sub__(self):
raise NotImplementedError
def __rsub__(self):
raise NotImplementedError
def __mul__(self):
raise NotImplementedError
def __rmul__(self):
raise NotImplementedError
def __div__(self):
raise NotImplementedError
def __rdiv__(self):
raise NotImplementedError
def plot(self, plot_type, form):
"""This function will return the arguments necessary for plotting
the data with both plt.plot and plt.errorbar in both integral and
differential form."""
# check inputs
message = "Plot type must be literal of either plot or errorbar."
assert plot_type in ('plot', 'errorbar'), message
message = "Spectrum form option must be literal in ('int', 'integral', 'dif', 'diff', 'differential')."
assert form in ('int', 'integral', 'dif', 'diff', 'differential'), message
# step looking data on a plt.plot
if plot_type == 'plot':
# create doubles of bin edges
X = np.array([[xx, xx] for xx in np.array(self.edges)]).flatten()[1:-1]
# plotting the integral form
if form in ('int', 'integral'):
Y = np.array([[yy, yy] for yy in np.array(self.int)]).flatten()
# plotting the differential form
elif form in ('dif', 'diff', 'differential'):
Y = np.array([[yy, yy] for yy in np.array(self.diff)]).flatten()
# return the x and y points
return X, Y
# for errorbars in the appropriate locations
elif plot_type == 'errorbar':
# errorbars on the integral form
if form in ('int', 'integral'):
return self.midpoints, self.int, self.int_error
# errorbars on the differential form
elif form in ('dif', 'diff', 'differential'):
return self.midpoints, self.diff, self.diff_error
class Spectrum2D(object):
"""A useful container for nuclear engineering applications.
This description will be updated in the future."""
def __init__(self, xedges, yedges, values, error, form='int', floor=(0, 0)):
"""Create a 2D verion of the spectrum object and its associated values.
Also calculates some useful things."""
# check array inputs
assert type(xedges) in (list, tuple, np.ndarray), "Bin edges must be of type list, tuple, or ndarray."
assert type(yedges) in (list, tuple, np.ndarray), "Bin edges must be of type list, tuple, or ndarray."
assert type(values) in (list, tuple, np.ndarray), "Values must be of type list, tuple, or ndarray."
assert type(error) in (int, list, tuple, np.ndarray), "Error must be of type int, list, tuple, or ndarray."
assert type(floor) in (list, list, tuple, np.ndarray), "Floor must be either int or float."
# check spectrum form
message = "Spectrum form option must be literal in ('int', 'integral', 'dif', 'diff', 'differential')."
assert form in ('int', 'integral', 'dif', 'diff', 'differential'), message
# allow error to be input as 0, which implies that all error is zero
if type(error) is int:
assert error is 0, "Error integer input only allows 0."
# then rewrite error
error = np.zeros((len(values), len(values[0])))
# check
if type(values) in (list, tuple):
message = "Issue with shape of values."
assert all([len(values[i]) == len(values[i + 1]) for i in range(len(values) - 1)]), message
# check val and err shapes
message = "Values and error must have same shape."
assert len(values) == len(error), message
assert len(values[0]) == len(error[0]), message
# check edges and val shapes
message = "Inconsistency in number of x edges/values."
assert len(xedges) in (len(values), len(values) + 1), message
message = "Inconsistency in number of y edges/values."
assert len(yedges) in (len(values[0]), len(values[0]) + 1), message
# guarantee that bin edges are unique and increasing or decreasing
message = "Bin edge values must be unique and strictly increasing or decreasing."
assert all(x < y for x, y in zip(xedges, xedges[1:])) or all(x > y for x, y in zip(xedges, xedges[1:])), message
assert all(x < y for x, y in zip(yedges, yedges[1:])) or all(x > y for x, y in zip(yedges, yedges[1:])), message
# convert all data types to numpy arrays
values = np.array(values)
error = np.array(error)
# special handling for different size edges
if len(xedges) == values.shape[0]:
xedges = np.concatenate((np.array([floor[0]]), np.array(xedges)))
elif len(xedges) == len(values) + 1:
xedges = np.array(xedges)
# do the same for y edges
if len(yedges) == values.shape[1]:
yedges = np.concatenate(( | np.array([floor[1]]) | numpy.array |
import pytest
import brightwind as bw
import pandas as pd
import numpy as np
import datetime
wndspd = 8
wndspd_df = pd.DataFrame([2, 13, np.NaN, 5, 8])
wndspd_series = pd.Series([2, 13, np.NaN, 5, 8])
current_slope = 0.045
current_offset = 0.235
new_slope = 0.046
new_offset = 0.236
wndspd_adj = 8.173555555555556
wndspd_adj_df = pd.DataFrame([2.0402222222222224, 13.284666666666668, np.NaN, 5.106888888888888, 8.173555555555556])
wndspd_adj_series = pd.Series([2.0402222222222224, 13.284666666666668, np.NaN, 5.106888888888888, 8.173555555555556])
DATA = bw.load_campbell_scientific(bw.demo_datasets.demo_campbell_scientific_data)
DATA_CLND = bw.apply_cleaning(DATA, bw.demo_datasets.demo_cleaning_file)
STATION = bw.MeasurementStation(bw.demo_datasets.demo_wra_data_model)
DATA_ADJUSTED = bw.load_csv(bw.demo_datasets.demo_data_adjusted_for_testing)
WSPD_COLS = ['Spd80mN', 'Spd80mS', 'Spd60mN', 'Spd60mS', 'Spd40mN', 'Spd40mS']
WDIR_COLS = ['Dir78mS', 'Dir58mS', 'Dir38mS']
MERRA2 = bw.load_csv(bw.demo_datasets.demo_merra2_NE)
def np_array_equal(a, b):
# nan's don't compare so use this instead
try:
np.testing.assert_equal(a, b)
except AssertionError:
return False
return True
def test_selective_avg():
date_today = datetime.datetime(2019, 6, 1)
days = pd.date_range(date_today, date_today + datetime.timedelta(24), freq='D')
data = pd.DataFrame({'DTM': days})
data = data.set_index('DTM')
data['Spd1'] = [1, np.NaN, 1, 1, 1, 1, 1, 1, 1, np.NaN, 1, 1, 1, 1, np.NaN, 1, 1, np.NaN, 1, 1, 1, 1, np.NaN, 1, 1]
data['Spd2'] = [2, 2, np.NaN, 2, 2, 2, 2, 2, np.NaN, 2, 2, 2, 2, np.NaN, 2, 2, 2, np.NaN, 2, 2, 2, 2, 2, np.NaN, 2]
data['Dir'] = [0, 15, 30, 45, np.NaN, 75, 90, 105, 120, 135, 150, 165, 180, 195, 210, 225, 240, 255, 270, 285, 300,
315, np.NaN, 345, 360]
# Test Case 1: Neither boom is near 0-360 crossover
result = np.array([1.5, 2, 1, 1.5, 1.5, 1.5, 1.5, 2, 1, 2, 2, 2, 1.5, 1, 2, 1.5, 1.5, np.NaN,
1.5, 1, 1, 1, 2, 1, 1.5])
bw.selective_avg(data[['Spd1']], data[['Spd2']], data[['Dir']],
boom_dir_1=315, boom_dir_2=135, sector_width=60)
sel_avg = np.array(bw.selective_avg(data.Spd1, data.Spd2, data.Dir,
boom_dir_1=315, boom_dir_2=135, sector_width=60))
assert np_array_equal(sel_avg, result)
# Test Case 2: Boom 1 is near 0-360 crossover
result = np.array([1.0, 2.0, 1.0, 1.0, 1.5, 1.5, 1.5, 1.5, 1.0, 2.0, 1.5, 1.5, 2.0, 1.0, 2.0, 2.0, 1.5, np.NaN,
1.5, 1.5, 1.5, 1.5, 2.0, 1.0, 1.0])
sel_avg = np.array(bw.selective_avg(data.Spd1, data.Spd2, data.Dir,
boom_dir_1=20, boom_dir_2=200, sector_width=60))
assert np_array_equal(sel_avg, result)
# Test Case 3: Boom 2 is near 0-360 crossover
result = np.array([2.0, 2.0, 1.0, 1.5, 1.5, 1.5, 1.5, 1.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.5, 1.5, np.NaN,
1.5, 1.5, 1.5, 1.5, 2.0, 1.0, 2.0])
sel_avg = np.array(bw.selective_avg(data.Spd1, data.Spd2, data.Dir,
boom_dir_1=175, boom_dir_2=355, sector_width=60))
assert np_array_equal(sel_avg, result)
# Test Case 4: Booms at 90 deg to each other
result = np.array([1.0, 2.0, 1.0, 1.5, 1.5, 2.0, 2.0, 2.0, 1.0, 2.0, 1.5, 1.5, 1.5, 1.0, 2.0, 1.5, 1.5, np.NaN,
1.5, 1.5, 1.5, 1.5, 2.0, 1.0, 1.0])
sel_avg = np.array(bw.selective_avg(data.Spd1, data.Spd2, data.Dir,
boom_dir_1=270, boom_dir_2=180, sector_width=60))
assert np_array_equal(sel_avg, result)
# Test Case 5: Sectors overlap error msg
with pytest.raises(ValueError) as except_info:
bw.selective_avg(data.Spd1, data.Spd2, data.Dir, boom_dir_1=180, boom_dir_2=185, sector_width=60)
assert str(except_info.value) == "Sectors overlap! Please check your inputs or reduce the size of " \
"your 'sector_width'."
def test_adjust_slope_offset_single_value():
assert wndspd_adj == bw.adjust_slope_offset(wndspd, current_slope, current_offset, new_slope, new_offset)
def test_adjust_slope_offset_df():
assert wndspd_adj_df.equals(bw.adjust_slope_offset(wndspd_df, current_slope, current_offset, new_slope, new_offset))
def test_adjust_slope_offset_series():
assert wndspd_adj_series.equals(bw.adjust_slope_offset(wndspd_series, current_slope,
current_offset, new_slope, new_offset))
def test_adjust_slope_offset_arg_str():
# check error msg if a string is sent as one of the slope or offset arguments
with pytest.raises(TypeError) as except_info:
bw.adjust_slope_offset(wndspd, current_slope, current_offset, '0.046', new_offset)
assert str(except_info.value) == "argument '0.046' is not of data type number"
def test_adjust_slope_offset_arg_wndspd_str():
# check error msg if a string is sent as the wind speed argument
with pytest.raises(TypeError) as except_info:
bw.adjust_slope_offset('8', current_slope, current_offset, new_slope, new_offset)
assert str(except_info.value) == "wspd argument is not of data type number"
def test_adjust_slope_offset_arg_wndspd_list():
# check error msg if a list is sent as the wind speed argument
with pytest.raises(TypeError) as except_info:
bw.adjust_slope_offset([2, 3, 4, 5], current_slope, current_offset, new_slope, new_offset)
assert str(except_info.value) == "wspd argument is not of data type number"
def test_adjust_slope_offset_arg_wndspd_df_str():
# check error msg if a string is an element in the pandas DataFrame
with pytest.raises(TypeError) as except_info:
bw.adjust_slope_offset(pd.DataFrame([2, 3, '4', 5]), current_slope, current_offset, new_slope, new_offset)
assert str(except_info.value) == "some values in the DataFrame are not of data type number"
def test_adjust_slope_offset_arg_wndspd_series_str():
# check error msg if a string is an element in the pandas DataFrame
with pytest.raises(TypeError) as except_info:
bw.adjust_slope_offset(pd.Series([2, 3, '4', 5]), current_slope, current_offset, new_slope, new_offset)
assert str(except_info.value) == "some values in the Series are not of data type number"
def test_apply_wspd_slope_offset_adj():
data = bw.apply_wspd_slope_offset_adj(DATA, STATION.measurements)
assert((DATA_ADJUSTED[WSPD_COLS].fillna(0).round(5) ==
data[WSPD_COLS].fillna(0).round(5)).all()).all()
wspd_cols_that_work = ['Spd80mN', 'Spd80mS', 'Spd60mN', 'Spd40mN']
data2 = bw.apply_wspd_slope_offset_adj(DATA[wspd_cols_that_work], STATION.measurements)
assert((DATA_ADJUSTED[wspd_cols_that_work].fillna(0).round(5) ==
data2[wspd_cols_that_work].fillna(0).round(5)).all()).all()
data1 = bw.apply_wspd_slope_offset_adj(DATA['Spd60mS'], STATION.measurements['Spd60mS'])
assert (data1.fillna(0).round(10) ==
DATA_ADJUSTED['Spd60mS'].fillna(0).round(10)).all()
def test_offset_wind_direction_float():
wdir_offset = float(5)
assert wdir_offset == bw.offset_wind_direction(float(20), 345)
def test_offset_wind_direction_df():
wdir_df_offset = pd.DataFrame([355, 15, np.NaN, 25, 335])
assert wdir_df_offset.equals(bw.offset_wind_direction(pd.DataFrame([10, 30, np.NaN, 40, 350]), 345))
def test_offset_wind_direction_series():
wdir_series_offset = pd.Series([355, 15, np.NaN, 25, 335])
assert wdir_series_offset.equals(bw.offset_wind_direction(pd.Series([10, 30, np.NaN, 40, 350]), 345))
def test_apply_wind_vane_dead_band_offset():
data1 = bw.apply_wind_vane_deadband_offset(DATA['Dir78mS'], STATION.measurements)
data = bw.apply_wind_vane_deadband_offset(DATA, STATION.measurements)
assert((DATA_ADJUSTED[WDIR_COLS].fillna(0).round(10) ==
data[WDIR_COLS].fillna(0).round(10)).all()).all()
assert (data1.fillna(0).round(10) ==
data['Dir78mS'].fillna(0).round(10)).all()
def test_freq_str_to_timedelta():
periods = ['1min', '5min', '10min', '15min',
'1H', '3H', '6H',
'1D', '7D', '1W', '2W',
'1MS', '1M', '3M', '6MS',
'1AS', '1A', '3A']
results = [60.0, 300.0, 600.0, 900.0,
3600.0, 10800.0, 21600.0,
86400.0, 604800.0, 604800.0, 1209600.0,
2629746.0, 2629746.0, 7889238.0, 15778476.0,
31536000.0, 31536000.0, 94608000.0]
for idx, period in enumerate(periods):
assert bw.transform.transform._freq_str_to_timedelta(period).total_seconds() == results[idx]
def test_round_timestamp_down_to_averaging_prd():
timestamp = pd.Timestamp('2016-01-09 11:21:11')
avg_periods = ['10min', '15min', '1H', '3H', '6H', '1D', '7D', '1W', '1MS', '1AS']
avg_period_start_timestamps = ['2016-1-9 11:20:00', '2016-1-9 11:15:00', '2016-1-9 11:00:00',
'2016-1-9 9:00:00', '2016-1-9 6:00:00', '2016-1-9', '2016-1-9', '2016-1-9',
'2016-1', '2016']
for idx, avg_period in enumerate(avg_periods):
assert avg_period_start_timestamps[idx] == \
bw.transform.transform._round_timestamp_down_to_averaging_prd(timestamp, avg_period)
def test_get_data_resolution():
import warnings
series1 = DATA['Spd80mS'].index
assert bw.transform.transform._get_data_resolution(series1).seconds == 600
series2 = pd.date_range('2010-01-01', periods=150, freq='H')
assert bw.transform.transform._get_data_resolution(series2).seconds == 3600
series1 = bw.average_data_by_period(DATA['Spd80mN'], period='1M', coverage_threshold=0, return_coverage=False)
assert bw.transform.transform._get_data_resolution(series1.index) == pd.Timedelta(1, unit='M')
series1 = bw.average_data_by_period(DATA['Spd80mN'], period='1AS', coverage_threshold=0, return_coverage=False)
assert bw.transform.transform._get_data_resolution(series1.index) == pd.Timedelta(365, unit='D')
# hourly series with one instance where difference between adjacent timestamps is 10 min
series3 = pd.date_range('2010-04-15', '2010-05-01', freq='H').union(pd.date_range('2010-05-01 00:10:00', periods=20,
freq='H'))
with warnings.catch_warnings(record=True) as w:
assert bw.transform.transform._get_data_resolution(series3).seconds == 3600
assert len(w) == 1
def test_offset_timestamps():
series1 = DATA['2016-01-10 00:00:00':]
# sending index with no start end
bw.offset_timestamps(series1.index, offset='90min')
# sending index with start end
op = bw.offset_timestamps(series1.index, offset='2min', date_from='2016-01-10 00:10:00')
assert op[0] == pd.to_datetime('2016-01-10 00:00:00')
assert op[1] == pd.to_datetime('2016-01-10 00:12:00')
op = bw.offset_timestamps(series1.index, '2min', date_to='2016-01-10 00:30:00')
assert op[3] == pd.to_datetime('2016-01-10 00:32:00')
assert op[4] == pd.to_datetime('2016-01-10 00:40:00')
op = bw.offset_timestamps(series1.index, '3min', date_from='2016-01-10 00:10:00', date_to='2016-01-10 00:30:00')
assert op[0] == pd.to_datetime('2016-01-10 00:00:00')
assert op[1] == pd.to_datetime('2016-01-10 00:13:00')
assert op[5] == pd.to_datetime('2016-01-10 00:50:00')
op = bw.offset_timestamps(series1.index, '10min', date_from='2016-01-10 00:10:00', date_to='2016-01-10 00:30:00')
assert op[0] == series1.index[0]
assert op[1] == series1.index[2]
# sending DataFrame with datetime index
op = bw.offset_timestamps(series1, offset='-10min', date_from='2016-01-10 00:20:00')
assert (op.iloc[1] == series1.iloc[1]).all()
assert len(op) + 1 == len(series1)
assert (op.loc['2016-01-10 00:40:00'] == series1.loc['2016-01-10 00:50:00']).all()
op = bw.offset_timestamps(series1, offset='-10min', date_from='2016-01-10 00:20:00', overwrite=True)
assert (op.loc['2016-01-10 00:10:00'] == series1.loc['2016-01-10 00:20:00']).all()
op = bw.offset_timestamps(series1, '10min', date_from='2016-01-10 00:10:00', date_to='2016-01-10 00:30:00')
assert (op.loc['2016-01-10 00:20:00'] == series1.loc['2016-01-10 00:10:00']).all()
assert (op.loc['2016-01-10 00:40:00'] == series1.loc['2016-01-10 00:40:00']).all()
assert len(op) + 1 == len(series1)
op = bw.offset_timestamps(series1, '10min', date_from='2016-01-10 00:10:00', date_to='2016-01-10 00:30:00',
overwrite=True)
assert (op.loc['2016-01-10 00:40:00'] == series1.loc['2016-01-10 00:30:00']).all()
assert len(op) + 1 == len(series1)
# sending Series with datetime index
op = bw.offset_timestamps(series1.Spd60mN, offset='-10min', date_from='2016-01-10 00:20:00')
assert (op.iloc[1] == series1.Spd60mN.iloc[1]).all()
assert len(op) + 1 == len(series1.Spd60mN)
assert (op.loc['2016-01-10 00:40:00'] == series1.Spd60mN.loc['2016-01-10 00:50:00']).all()
op = bw.offset_timestamps(series1.Spd60mN, offset='-10min', date_from='2016-01-10 00:20:00', overwrite=True)
assert (op.loc['2016-01-10 00:10:00'] == series1.Spd60mN.loc['2016-01-10 00:20:00']).all()
op = bw.offset_timestamps(series1.Spd60mN, '10min', date_from='2016-01-10 00:10:00', date_to='2016-01-10 00:30:00')
assert (op.loc['2016-01-10 00:20:00'] == series1.Spd60mN.loc['2016-01-10 00:10:00']).all()
assert (op.loc['2016-01-10 00:40:00'] == series1.Spd60mN.loc['2016-01-10 00:40:00']).all()
assert len(op) + 1 == len(series1.Spd60mN)
op = bw.offset_timestamps(series1.Spd60mN, '10min', date_from='2016-01-10 00:10:00', date_to='2016-01-10 00:30:00',
overwrite=True)
assert (op.loc['2016-01-10 00:40:00'] == series1.Spd60mN.loc['2016-01-10 00:30:00']).all()
assert len(op) + 1 == len(series1.Spd60mN)
def test_average_wdirs():
wdirs = np.array([350, 10])
assert bw.average_wdirs(wdirs) == 0.0
wdirs = np.array([0, 180])
assert bw.average_wdirs(wdirs) is np.NaN
wdirs = | np.array([90, 270]) | numpy.array |
# ------------------------------------------------------------------------------
# Global imports
# ------------------------------------------------------------------------------
from typing import List
from typing import Optional
from pathlib import Path
import torch as pt
import numpy as np
from mpath.layer import Retina
from mpath.layer import Dense
class Network:
"""
A stateful network consisting of a sensor layer and one or more deeper layers.
"""
def __init__(
self,
_layer_sizes: List[int],
_layer_tau: Optional[List[float]] = None,
_learn: bool = True,
_min_weight: float = -1.0,
_max_weight: float = 1.0,
_learning_rate: float = 0.05,
_keep_activations: bool = False,
_keep_weights: bool = False,
):
"""
Initialise a network with a retinal layer and a number of deeper layers.
"""
assert _layer_tau is None or len(_layer_tau) == len(
_layer_sizes
), f"==[ Error: You have to provide either all or none of the membrane time constants."
# Create the retina
self.retina = Retina(
_size=_layer_sizes[0],
_tau=_layer_tau[0] if _layer_tau else None,
_activation_history=_keep_activations,
)
# Create layers.
# Retinal layers have ON and OFF cells, so they are actually twice the size of the input variables.
self.layers = []
input_size = 2 * _layer_sizes[0]
for idx, layer_size in enumerate(_layer_sizes[1:]):
self.layers.append(
Dense(
size=layer_size,
input_size=input_size,
tau=None if _layer_tau is None else _layer_tau[idx],
min_weight=_min_weight,
max_weight=_max_weight,
learn=_learn,
learning_rate=_learning_rate,
activation_history=_keep_activations,
weight_history=_keep_weights,
)
)
input_size = layer_size
# Indicate if we want the network to learn
self.learn: bool = _learn
# Learning rate
self.learning_rate: float = _learning_rate
# Input history
self.input_history: Optional[List] = [] if _keep_activations else None
# Weight history
self.weight_history: Optional[pt.Tensor] = (
_keep_weights if _keep_weights else None
)
def integrate(
self,
_input_signals: pt.Tensor,
):
"""
Integrate input signals and propagate activations through the network.
"""
if self.input_history is not None:
self.input_history.append(_input_signals.squeeze_().numpy())
# Capture inputs with the retina
self.retina.integrate(_input_signals)
_input_signals = self.retina.activations
# Propagate the inputs through the layers
for layer in self.layers:
layer.integrate(_input_signals)
_input_signals = layer.activations
def freeze(self):
"""
Stop learning.
"""
for layer in self.layers:
layer.learn = False
def unfreeze(self):
"""
Resume learning.
"""
for layer in self.layers:
layer.learn = True
def _params(self):
"""
Get the network parameters.
"""
params = {}
params[f"lr"] = np.array([self.learning_rate])
params[f"tau_ret"] = self.retina.tau.numpy()
params[f"size_ret"] = | np.array(self.retina.activations.shape) | numpy.array |
from pnuml.learner.base_learner import BaseLearner
import numpy as np
import pprint
class Regression(BaseLearner):
def __init__(self, weight, activation, **kwargs):
"""
configure regression model
param weight : weight **w** of the model
param activation : can be ReLU or Sigmoid or TanH
"""
super().__init__()
self.weight = weight
self.activation = activation
def train_on_batch(self, batch_loader, epoch, lr=1e-2, lr_decay=1e-9):
"""
train weight parameter of the model with given dataset generator.
learing rate can be decayed with given lr_decay arguments.
whole training process iterated with count epoch.
"""
if self.weight.shape[-1] != batch_loader.x_shape[-1] + 1:
raise ValueError("weight and input x shape + pad(1) must matched\n"
"Weight shape {}, input x shape {}".format(
self.weight.shape, batch_loader.x_shape))
loss = []
for _ in range(epoch):
for (x, y) in batch_loader:
x = np.pad(x, ((0, 0), (1, 0)), 'constant', constant_values=1)
g = np.matmul(self.weight, x.T)
h_w = self.activation.forward(g)
loss.append(np.sum(y * np.log(h_w) +
(1. - y) * np.log(1. - h_w)))
self.weight = self.weight + lr * (y - h_w) * x
lr -= lr * lr_decay
return loss
def predict_on_batch(self, batch_loader):
"""
make prediction with trained weight value to the given
test dataset generator.
return prediction probabilities.
"""
prediction = []
for (x, y) in batch_loader:
x = np.pad(x, ((0, 0), (1, 0)), 'constant', constant_values=1)
g = | np.matmul(self.weight, x.T) | numpy.matmul |
# -*- coding: utf-8 -*-
"""Siamese Network for performing training of a Deep Convolutional
Network for Face Verification on the Olivetti and LFW Faces datasets.
Dependencies:
python 3.4+, numpy>=1.10.4, sklearn>=0.17, scipy>=0.17.0, theano>=0.7.0, lasagne>=0.1, cv2, dlib>=18.18 (only required if using the 'trees' crop mode).
Part of the package siamese_net:
siamese_net/
siamese_net/faces.py
siamese_net/datasets.py
siamese_net/normalization.py
siamese_net/siamese_net.py
Copyright 2016 Kadenze, Inc.
Kadenze(R) and Kannu(R) are Registered Trademarks of Kadenze, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
"""
import sys
import pickle
import os
# base_compiledir = os.path.expandvars("$HOME/.theano/slot-%d" % (os.getpid()))
# os.environ['THEANO_FLAGS'] = "base_compiledir=%s" % base_compiledir
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import time
import lasagne
# For training the final output network
from sklearn import cross_validation
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
# Custom code for parsing datasets and normalizing images
from datasets import Datasets
from normalization import LCN, ZCA
# plt.style.use('ggplot')
theano.config.floatX = 'float32'
def montage(x):
if x.shape[1] == 1 or x.shape[1] == 3:
num_img = x.shape[0]
num_img_per_dim = np.ceil(np.sqrt(num_img)).astype(int)
montage_img = np.zeros((
num_img_per_dim * x.shape[3],
num_img_per_dim * x.shape[2], x.shape[1]))
else:
num_img_per_dim = np.ceil(np.sqrt(x.shape[1])).astype(int)
montage_img = np.zeros((
num_img_per_dim * x.shape[3],
num_img_per_dim * x.shape[2]))
num_img = x.shape[1]
for img_i in range(num_img_per_dim):
for img_j in range(num_img_per_dim):
if img_i * num_img_per_dim + img_j < num_img:
if x.shape[0] == 1:
montage_img[
img_i * x.shape[3]: (img_i + 1) * x.shape[2],
img_j * x.shape[3]: (img_j + 1) * x.shape[2]
] = np.squeeze(np.squeeze(
x[0, img_i * num_img_per_dim + img_j, ...]
) / (np.max(x[0, img_i * num_img_per_dim + img_j, ...]
) + 1e-15))
else:
montage_img[
img_i * x.shape[3]: (img_i + 1) * x.shape[2],
img_j * x.shape[3]: (img_j + 1) * x.shape[2],
:
] = np.swapaxes(np.squeeze(
x[img_i * num_img_per_dim + img_j, ...]
) / (np.max(x[img_i * num_img_per_dim + img_j, ...]
) + 1e-15), 0, 2)
return montage_img
def get_image_manifold(images, features, res=64, n_neighbors=5):
'''Creates a montage of the images based on a TSNE
manifold of the associated image features.
'''
from sklearn import manifold
mapper = manifold.SpectralEmbedding()
transform = mapper.fit_transform(features)
nx = int(np.ceil(np.sqrt(len(transform))))
ny = int(np.ceil(np.sqrt(len(transform))))
montage_img = np.zeros((res * nx, res * ny, 3))
from sklearn.neighbors import NearestNeighbors
nn = NearestNeighbors()
nn.fit(transform)
min_x = np.mean(transform[:, 0]) - np.std(transform[:, 0]) * 3.0
max_x = np.mean(transform[:, 0]) + np.std(transform[:, 0]) * 3.0
min_y = np.mean(transform[:, 1]) - np.std(transform[:, 1]) * 3.0
max_y = np.mean(transform[:, 1]) + np.std(transform[:, 1]) * 3.0
for n_i in range(nx):
for n_j in range(ny):
x = min_x + (max_x - min_x) / nx * n_i
y = min_y + (max_y - min_y) / ny * n_j
idx = nn.kneighbors([x, y], n_neighbors=n_neighbors)[1][0][:]
for neighbor_i in idx:
montage_img[
n_i * res: (n_i + 1) * res, n_j * res: (n_j + 1) * res, :] += images[neighbor_i]
montage_img[
n_i * res: (n_i + 1) * res, n_j * res: (n_j + 1) * res, :] /= float(len(idx))
montage_img = montage_img / np.max(montage_img)
return montage_img
def make_image_pairs(X, y, unique_labels):
'''For each person in unique_labels (P people):
1. combine all matched pairs the images of that person (N images):
N_matched = (P choose 2) * (N choose 2)
2. combine all imposter pairs. N_unmatched = (P choose 2) * (N * N)
Returns an array of matched and unmatched images and their targets
------------------------------------------------------------------
X_matched, y_matched, X_unmatched, y_unmatched
where the dimensions of the Xs are (with 2 being each image in the pair):
[(N_matched + N_unmatched) x 2 x W x H]
and ys are
----------
[(N_matched + N_unmatched),]
Args
----
X : TYPE
Description
y : TYPE
Description
unique_labels : TYPE
Description
Deleted Parameters
------------------
X (TYPE) : Description
y (TYPE) : Description
unique_labels (TYPE) : Description
'''
from itertools import combinations
X_pairs_matched = list()
y_pairs_matched = list()
# Iterate over all actual pairs
# 32 choose 2 = 496 people pairs. 496 * (10 images choose 2) = 496 * 45 =
# 1440
for person in unique_labels:
# Find images of those people
im_idx = np.where(person == y)[0]
for el in combinations(im_idx, 2):
X_pairs_matched.append(
np.concatenate((X[el[0], ...], X[el[1], ...]),
axis=0)[np.newaxis, ...])
y_pairs_matched.append(1)
X_pairs_unmatched = list()
y_pairs_unmatched = list()
# Iterate over all imposter pairs of people
# (32 choose 2 = 496 people pairs. 496 * 10 * 10 image pairs =
# 49600 imposter pairs)
# (157 * 0.4 = 63), 63 choose 2 = 1953, 1953 * 100 = 195300
for pair in combinations(unique_labels, 2):
# Find images of those people
im1_idx = np.where(pair[0] == y)[0]
im2_idx = np.where(pair[1] == y)[0]
for im1_idx_it in im1_idx:
for im2_idx_it in im2_idx:
X_pairs_unmatched.append(np.concatenate(
(X[im1_idx_it, ...], X[im2_idx_it, ...]),
axis=0)[np.newaxis, ...])
y_pairs_unmatched.append(0)
return (np.concatenate(X_pairs_matched),
np.array(y_pairs_matched),
np.concatenate(X_pairs_unmatched),
np.array(y_pairs_unmatched))
def make_image_pair_idxs(y, unique_labels):
'''For each person in unique_labels (P people):
1. combine all matched pairs the images of that person (N images):
N_matched = (P choose 2) * (N choose 2)
2. combine all imposter pairs. N_unmatched = (P choose 2) * (N * N)
Returns an array of matched and unmatched images and their targets
------------------------------------------------------------------
X_matched, y_matched, X_unmatched, y_unmatched
where the dimensions of the Xs are [(N_matched + N_unmatched) x 2]
(with 2 being the index into X defining the image in the pair),
and ys are [(N_matched + N_unmatched),]
Args
----
y : TYPE
Description
unique_labels : TYPE
Description
Deleted Parameters
------------------
y (TYPE) : Description
unique_labels (TYPE) : Description
'''
from itertools import combinations
X_pairs_matched = list()
y_pairs_matched = list()
# Iterate over all actual pairs
# 32 choose 2 = 496 people pairs. 496 * (10 images choose 2) = 496 * 45 =
# 1440
for person in unique_labels:
# Find images of those people
im_idx = np.where(person == y)[0]
for el in combinations(im_idx, 2):
X_pairs_matched.append(np.array([el[0], el[1]])[np.newaxis, ...])
y_pairs_matched.append(1)
X_pairs_unmatched = list()
y_pairs_unmatched = list()
# Iterate over all imposter pairs of people
# (32 choose 2 = 496 people pairs. 496 * 10 * 10 image pairs = 49600 imposter pairs)
# (157 * 0.4 = 63), 63 choose 2 = 1953, 1953 * 100 = 195300
for pair_i, pair in enumerate(combinations(unique_labels, 2)):
# Find images of those people
im1_idx = np.where(pair[0] == y)[0]
im2_idx = | np.where(pair[1] == y) | numpy.where |
from typing import List, Tuple, Dict
import argparse
from scipy.ndimage import fourier_shift, shift
from skimage.feature import register_translation, masked_register_translation
from skimage.transform import rescale
from skimage import io
from shutil import move
from tqdm import tqdm
from parseConfig import parseConfig
import torch
import random
import pandas as pd
import numpy as np
import glob
import os
import gc
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
DEBUG = 0
# Set the data data directory
# Download data at https://kelvins.esa.int/proba-v-super-resolution/data/
DATA_BANK_DIRECTORY = '/home/mark/DataBank/probav_data/'
DATA_BANK_DIRECTORY_PREPROCESSING_CHKPT = '/home/mark/DataBank/PROBA-V-CHKPT'
LOSS_CROP_BORDER = 3
def parser():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default='cfg/p16t9c85r12.cfg', type=str)
parser.add_argument('--band', default='NIR', type=str)
parser.add_argument('--dir', type=str, default='/home/mark/DataBank/probav_data/')
parser.add_argument('--ckptdir', default='/home/mark/DataBank/PROBA-V-CHKPT', type=str)
parser.add_argument('--numTopClearest', type=int, default=9)
parser.add_argument('--patchSizeLR', type=int, default=16) # base patch size is 32
parser.add_argument('--patchStrideLR', type=int, default=16)
parser.add_argument('--clarityThresholdLR', type=float, default=0.85)
parser.add_argument('--clarityThresholdHR', type=float, default=0.85)
parser.add_argument('--numPermute', type=int, default=19)
parser.add_argument('--toPad', type=bool, default=True)
parser.add_argument('--ckpt', type=int, nargs='+', default=[1, 2, 3, 4, 5])
opt = parser.parse_args()
return opt
def main(config):
rawDataDir = opt.dir
cleanDataDir = opt.ckptdir
band = opt.band
arrayDir = os.path.join(cleanDataDir, 'arrayDir')
trimmedArrayDir = os.path.join(cleanDataDir, 'trimmedArrayDir')
patchesDir = os.path.join(cleanDataDir, 'patchesDir')
trimmedPatchesDir = os.path.join(cleanDataDir, 'trimmedPatchesDir')
augmentedPatchesDir = os.path.join(cleanDataDir, 'augmentedPatchesDir')
# Check validity of directories
if not os.path.exists(arrayDir):
os.makedirs(arrayDir)
if not os.path.exists(trimmedArrayDir):
os.makedirs(trimmedArrayDir)
if not os.path.exists(patchesDir):
os.makedirs(patchesDir)
if not os.path.exists(trimmedPatchesDir):
os.makedirs(trimmedPatchesDir)
if not os.path.exists(augmentedPatchesDir):
os.makedirs(augmentedPatchesDir)
# CHECKPOINT 1 - RAW DATA LOAD AND SAVE
if 1 in opt.ckpt:
# Train
logging.info('Loading and dumping raw data...')
loadAndSaveRawData(rawDataDir, arrayDir, 'NIR', isGrayScale=True, isTrainData=True)
loadAndSaveRawData(rawDataDir, arrayDir, 'RED', isGrayScale=True, isTrainData=True)
# Test
loadAndSaveRawData(rawDataDir, arrayDir, 'NIR', isGrayScale=True, isTrainData=False)
loadAndSaveRawData(rawDataDir, arrayDir, 'RED', isGrayScale=True, isTrainData=False)
# CHECKPOINT 2 - IMAGE REGISTRATION AND CORRUPTED IMAGE SET REMOVAL
if 2 in opt.ckpt:
# Load dataset
logging.info(f'Loading {band} dataset...')
TRAIN, TEST = loadData(arrayDir, band)
# Process the train dataset
logging.info(f'Processing {band} train dataset...')
allImgLR, allMskLR, allImgHR, allMskHR = TRAIN
allImgMskLR = registerImages(allImgLR, allMskLR) # np.ma.masked_array
allImgMskHR = convertToMaskedArray(allImgHR, allMskHR) # np.ma.masked_array
trmImgMskLR, trmImgMskHR = removeCorruptedTrainImageSets(
allImgMskLR, allImgMskHR, clarityThreshold=opt.clarityThresholdLR)
trmImgMskLR = pickClearLRImgsPerImgSet(
trmImgMskLR, numImgToPick=opt.numTopClearest, clarityThreshold=opt.clarityThresholdLR)
# Process the test dataset
logging.info(f'Processing {band} test dataset...')
allImgLRTest, allMskLRTest = TEST
allImgMskLRTest = registerImages(allImgLRTest, allMskLRTest) # np.ma.masked_array
trmImgMskLRTest = removeCorruptedTestImageSets(allImgMskLRTest, clarityThreshold=opt.clarityThresholdLR)
trmImgMskLRTest = pickClearLRImgsPerImgSet(
trmImgMskLRTest, numImgToPick=opt.numTopClearest, clarityThreshold=opt.clarityThresholdLR)
logging.info(f'Saving {band} trimmed dataset...')
if not os.path.exists(trimmedArrayDir):
os.makedirs(trimmedArrayDir)
trmImgMskLR.dump(os.path.join(trimmedArrayDir, f'TRAINimgLR_{band}.npy'))
trmImgMskHR.dump(os.path.join(trimmedArrayDir, f'TRAINimgHR_{band}.npy'))
trmImgMskLRTest.dump(os.path.join(trimmedArrayDir, f'TESTimgLR_{band}.npy'))
# CHECKPOINT 3 - PATCH GENERATION
if 3 in opt.ckpt:
# Generate patches
logging.info(f'Loading TEST {band} LR patch dataset...')
trmImgMskLRTest = np.load(os.path.join(trimmedArrayDir, f'TESTimgLR_{band}.npy'), allow_pickle=True)
logging.info(f'Generating TEST {band} LR Patches...')
numImgSet, numImgPerImgSet, C, _, _ = trmImgMskLRTest.shape
# padding with size of Loss Crop cropBorder
if opt.toPad:
paddings = [[0, 0], [0, 0], [0, 0], [LOSS_CROP_BORDER,
LOSS_CROP_BORDER], [LOSS_CROP_BORDER, LOSS_CROP_BORDER]]
trmImgLRTest = np.pad(trmImgMskLRTest, paddings, 'reflect')
trmMskLRTest = np.pad(trmImgMskLRTest.mask, paddings, 'reflect')
trmImgMskLRTest = np.ma.masked_array(trmImgLRTest, mask=trmMskLRTest)
MAX_SHIFT = 2 * LOSS_CROP_BORDER
else:
MAX_SHIFT = 0
patchesLR = generatePatches(trmImgMskLRTest, patchSize=opt.patchSizeLR + MAX_SHIFT, stride=opt.patchSizeLR)
patchesLR = patchesLR.reshape((numImgSet, -1, numImgPerImgSet, C,
opt.patchSizeLR + MAX_SHIFT, opt.patchSizeLR + MAX_SHIFT))
logging.info(f'Saving {band} LR Patches...')
patchesLR.dump(os.path.join(patchesDir, f'TESTpatchesLR_{band}.npy'), protocol=4)
del trmImgMskLRTest
del patchesLR
gc.collect()
logging.info(f'Loading TRAIN {band} LR patch dataset...')
trmImgMskLR = np.load(os.path.join(trimmedArrayDir, f'TRAINimgLR_{band}.npy'), allow_pickle=True)
logging.info(f'Generating TRAIN {band} LR Patches...')
numImgSet, numImgPerImgSet, C, H, W = trmImgMskLR.shape
if opt.toPad:
paddings = [[0, 0], [0, 0], [0, 0], [LOSS_CROP_BORDER,
LOSS_CROP_BORDER], [LOSS_CROP_BORDER, LOSS_CROP_BORDER]]
trmImgLR = np.pad(trmImgMskLR, paddings, 'reflect')
trmMskLR = np.pad(trmImgMskLR.mask, paddings, 'reflect')
trmImgMskLR = np.ma.masked_array(trmImgLR, mask=trmMskLR)
MAX_SHIFT = 2 * LOSS_CROP_BORDER
else:
MAX_SHIFT = 0
patchesLR = generatePatches(trmImgMskLR, patchSize=opt.patchSizeLR + MAX_SHIFT, stride=opt.patchStrideLR)
patchesLR = patchesLR.reshape((numImgSet, -1, numImgPerImgSet, C,
opt.patchSizeLR + MAX_SHIFT, opt.patchSizeLR + MAX_SHIFT))
logging.info(f'Saving {band} LR Patches...')
patchesLR.dump(os.path.join(patchesDir, f'TRAINpatchesLR_{band}.npy'), protocol=4)
del trmImgMskLR
del patchesLR
gc.collect()
logging.info(f'Loading TRAIN {band} HR patch dataset...')
trmImgMskHR = np.load(os.path.join(trimmedArrayDir, f'TRAINimgHR_{band}.npy'), allow_pickle=True)
logging.info(f'Generating TRAIN {band} HR Patches...')
numImgSet, numImgPerImgSet, C, Hhr, Whr = trmImgMskHR.shape
# Compute upsampleScale
upsampleScale = Hhr // H
patchesHR = generatePatches(trmImgMskHR, patchSize=opt.patchSizeLR *
upsampleScale, stride=opt.patchStrideLR * upsampleScale)
patchesHR = patchesHR.reshape((numImgSet, -1, numImgPerImgSet, C, opt.patchSizeLR *
upsampleScale, opt.patchSizeLR * upsampleScale))
logging.info(f'Saving {band} HR Patches...')
patchesHR.dump(os.path.join(patchesDir, f'TRAINpatchesHR_{band}.npy'), protocol=4)
del trmImgMskHR
del patchesHR
gc.collect()
# CHECKPOINT 4 - CLEANING PATCHES
if 4 in opt.ckpt:
logging.info(f'Loading {band} train LR Patches...')
patchesLR = np.load(os.path.join(patchesDir, f'TRAINpatchesLR_{band}.npy'), allow_pickle=True)
logging.info(f'Loading {band} train HR Patches...')
patchesHR = np.load(os.path.join(patchesDir, f'TRAINpatchesHR_{band}.npy'), allow_pickle=True)
logging.info(f'Remove corrupted train {band} Patch sets...')
trmPatchesLR, trmPatchesHR = removeCorruptedTrainPatchSets(
patchesLR, patchesHR, clarityThreshold=opt.clarityThresholdHR)
logging.info(f'Deleting {band} train HR patches that has below {opt.clarityThresholdHR} clarity...')
trmPatchesLR, trmPatchesHR = pickClearPatches(
trmPatchesLR, trmPatchesHR, clarityThreshold=opt.clarityThresholdHR)
# Reshape to [N, C, D, H, W] for PyTorch training
logging.info(f'Reshaping {band} train patches...')
trmPatchesLR = trmPatchesLR.transpose((0, 3, 4, 1, 2)) # shape is (numImgSet, H, W, numLRImg, C)
trmPatchesHR = trmPatchesHR.transpose((0, 3, 4, 1, 2))
trmPatchesHR = trmPatchesHR.squeeze(4) # (numImgSet, H, W, C)
logging.info(f'Saving {band} train patches...')
trmPatchesLR.dump(os.path.join(trimmedPatchesDir, f'TRAINpatchesLR_{band}.npy'), protocol=4)
trmPatchesHR.dump(os.path.join(trimmedPatchesDir, f'TRAINpatchesHR_{band}.npy'), protocol=4)
# CHECKPOINT 5 - AUGMENTING PATCHES
if 5 in opt.ckpt:
logging.info(f'Loading {band} train LR Patches...')
augmentedPatchesLR = np.load(os.path.join(trimmedPatchesDir, f'TRAINpatchesLR_{band}.npy'), allow_pickle=True)
logging.info(f'Augmenting by permuting {band} train HR Patches... Input: {augmentedPatchesLR.shape}')
augmentedPatchesLR = augmentByShufflingLRImgs(augmentedPatchesLR, numPermute=opt.numPermute)
logging.info(f'Augmenting by flipping {band} train LR Patches... Input: {augmentedPatchesLR.shape}')
#augmentedPatchesLR = augmentByFlipping(augmentedPatchesLR)
logging.info(f'Saving {band} train LR Patches... Final shape: {augmentedPatchesLR.shape}')
augmentedPatchesLR.dump(os.path.join(augmentedPatchesDir, f'TRAINpatchesLR_{band}.npy'), protocol=4)
del augmentedPatchesLR
gc.collect()
logging.info(f'Loading {band} train HR Patches...')
augmentedPatchesHR = np.load(os.path.join(trimmedPatchesDir, f'TRAINpatchesHR_{band}.npy'), allow_pickle=True)
logging.info(f'Augmenting by permuting {band} train HR Patches... Input: {augmentedPatchesHR.shape}')
augmentedPatchesHR = np.tile(augmentedPatchesHR, (opt.numPermute + 1, 1, 1, 1))
logging.info(f'Augmenting by flipping {band} train HR Patches... Input: {augmentedPatchesHR.shape}')
#augmentedPatchesHR = augmentByFlipping(augmentedPatchesHR)
logging.info(f'Saving {band} train HR Patches... Final shape: {augmentedPatchesHR.shape}')
augmentedPatchesHR.dump(os.path.join(augmentedPatchesDir, f'TRAINpatchesHR_{band}.npy'), protocol=4)
del augmentedPatchesHR
gc.collect()
def augmentByRICAP():
pass
def augmentByShufflingLRImgs(patchLR: np.ma.masked_array, numPermute=9):
# shape is (numImgSet, H, W, numLRImg, C)
# (numImgSet, H, W, C)
if numPermute == 0:
return patchLR
numImgSet, H, W, numLRImg, C = patchLR.shape
cacheLR = [patchLR]
for _ in range(numPermute):
idx = np.random.permutation(np.arange(numLRImg))
shuffled = patchLR[:, :, :, idx, :]
cacheLR.append(shuffled)
patchLR = np.concatenate(cacheLR)
return patchLR
def augmentByFlipping(patches: np.ma.masked_array):
img90 = np.rot90(patches, k=1, axes=(1, 2))
img180 = np.rot90(patches, k=2, axes=(1, 2))
img270 = np.rot90(patches, k=3, axes=(1, 2))
imgFlipV = np.flip(patches, axis=(1))
imgFlipH = np.flip(patches, axis=(2))
imgFlipVH = np.flip(patches, axis=(1, 2))
allImgMsk = np.concatenate((patches, img90, img180, img270, imgFlipV, imgFlipH, imgFlipVH))
return allImgMsk
def pickClearPatches(patchesLR: np.ma.masked_array,
patchesHR: np.ma.masked_array,
clarityThreshold: float) -> List[np.ma.masked_array]:
'''
Input:
patchesLR: np.ma.masked_array[numImgSet, numPatches, numLowResImg, C, H, W]
patchesHR: np.ma.masked_array[numImgSet, numPatches, 1, C, H, W]
clarityThreshold: float
Output:
cleanPatchesLR: np.ma.masked_array[numImgSet*newNumPatches, numLowResImg, C, H, W]
cleanPatchesHR: np.ma.masked_array[numImgSet*newNumPatches, 1, C, H, W]
where newNumPatches <= numPatches
'''
desc = '[ INFO ] Cleaning train patches '
numImgSet, numPatches, numLowResImg, C, HLR, WLR = patchesLR.shape
reshapeLR = patchesLR.reshape((-1, numLowResImg, C, HLR, WLR))
_, _, numHighResImg, C, HHR, WHR = patchesHR.shape
reshapeHR = patchesHR.reshape((-1, numHighResImg, C, HHR, WHR))
booleanMask = np.array([isPatchNotCorrupted(patch, clarityThreshold)
for patch in tqdm(reshapeHR, desc=desc)])
trimmedPatchesLR = reshapeLR[booleanMask]
trimmedPathcesHR = reshapeHR[booleanMask]
return (trimmedPatchesLR, trimmedPathcesHR)
def pickClearPatchesV2(patchesLR: np.ma.masked_array,
patchesHR: np.ma.masked_array,
clarityThreshold: float) -> np.array:
'''
Input:
patchesLR: np.ma.masked_array[numImgSet, numPatches, numLowResImg, C, H, W]
patchesHR: np.ma.masked_array[numImgSet, numPatches, 1, C, H, W]
clarityThreshold: float
Output:
cleanPatchesLR: np.ma.masked_array[numImgSet, newNumPatches, numLowResImg, C, H, W]
cleanPatchesHR: np.ma.masked_array[numImgSet, newNumPatches, 1, C, H, W]
where newNumPatches <= numPatches
'''
desc = '[ INFO ] Cleaning train patches '
trmPatchesLR, trmPatchesHR = [], []
for patchSetLR, patchSetHR in tqdm(zip(patchesLR, patchesHR), desc=desc, total=len(patchesLR)):
trmPatchSetLR, trmPatchSetHR = explorePatchSet(patchSetLR, patchSetHR, clarityThreshold)
trmPatchesLR.append(trmPatchSetLR)
trmPatchesHR.append(trmPatchSetHR)
return (np.array(trmPatchesLR), np.array(trmPatchesHR))
def explorePatchSet(patchSetLR: np.ma.masked_array,
patchSetHR: np.ma.masked_array,
clarityThreshold: float) -> List[np.ma.array]:
'''
Explores a patch set and removes patches that do not have enough clarity.
Input:
patchSetLR: np.ma.masked_array[numPatches, numLowResImg, C, H, W],
patchSetHR: np.ma.masked_array[numPatches, 1, C, H, W],
clarityThreshold: float
'''
booleanMask = np.array([isPatchNotCorrupted(patch, clarityThreshold)
for patch in patchSetHR])
trmPatchSetLR = patchSetLR[booleanMask]
trmPatchSetHR = patchSetHR[booleanMask]
return trmPatchSetLR, trmPatchSetHR
def isPatchNotCorrupted(patch: np.ma.masked_array, clarityThreshold: float) -> bool:
'''
Determine if an HR patch passes the threshold
Input:
patchSet: np.ma.masked_array[1, C, H, W]
clarityThreshold: float
Output:
boolean that answers the question is Patch good enough?
'''
# totalPixels = imgSet.shape[2] * imgSet.shape[3] # width * height
isPatchClearEnough = np.count_nonzero(patch.mask)/(patch.shape[2] * patch.shape[3]) < (1-clarityThreshold)
return isPatchClearEnough
def removeCorruptedTrainPatchSets(patchesLR: np.ma.masked_array,
patchesHR: np.ma.masked_array,
clarityThreshold: float) -> List[np.ma.masked_array]:
'''
Input:
patchesLR: np.ma.masked_array[numImgSet, numPatches, numLowResImg, C, H, W]
patchesHR: np.ma.masked_array[numImgSet, numPatches, 1, C, H, W]
clarityThreshold: float
Output:
cleanPatchesLR: np.ma.masked_array[numImgSet, newNumPatches, numLowResImg, C, H, W]
cleanPatchesHR: np.ma.masked_array[numImgSet, newNumPatches, 1, C, H, W]
where newNumPatches <= numPatches
'''
# '[ INFO ] Loading LR masks and dumping '
desc = '[ INFO ] Removing corrupted train sets '
booleanMask = np.array([isPatchSetNotCorrupted(patchSet, clarityThreshold)
for patchSet in tqdm(patchesHR, desc=desc)])
trimmedPatchesLR = patchesLR[booleanMask]
trimmedPathcesHR = patchesHR[booleanMask]
return (trimmedPatchesLR, trimmedPathcesHR)
def removeCorruptedTestPatchSets(patchesLR: np.ma.masked_array,
clarityThreshold: float) -> np.ma.masked_array:
'''
Input:
patchesLR: np.ma.masked_array[numImgSet, numPatches, numLowResImg, C, H, W]
clarityThreshold: float
Output:
cleanPatchesLR: np.ma.masked_array[numImgSet, newNumPatches, numLowResImg, C, H, W]
where newNumPatches <= numPatches
'''
desc = '[ INFO ] Removing corrupted test sets '
booleanMask = np.array([isPatchSetNotCorrupted(patchSet, clarityThreshold)
for patchSet in tqdm(patchesHR, desc=desc)])
trimmedPatchesLR = patchesLR[booleanMask]
return trimmedPatchesLR
def isPatchSetNotCorrupted(patchSet: np.ma.masked_array, clarityThreshold: float) -> bool:
'''
Determine if all the LR images are not clear enough.
Return False if ALL LR image clarity is below threshold.
Input:
patchSet: np.ma.masked_array[numPatches, numLowResImg, C, H, W]
clarityThreshold: float
Output:
boolean that answers the question is PatchSet not Corrupted?
'''
# totalPixels = imgSet.shape[2] * imgSet.shape[3] # width * height
isPatchClearEnough = np.array([np.count_nonzero(patch.mask)/(patch.shape[-1]*patch.shape[-2]) < (1-clarityThreshold)
for patch in patchSet])
return np.sum(isPatchClearEnough) != 0
def generatePatches(imgSets: np.ma.masked_array, patchSize: int, stride: int) -> np.ma.masked_array:
'''
Input:
images: np.ma.masked_array[numImgSet, numImgPerImgSet, channels, height, width]
patchSize: int
stride: int
Output:
np.ma.masked_array[numImgSet, numImgPerImgSet * numPatches, channels, patchSize, patchSize]
'''
desc = f'[ INFO ] Generating patches (k={patchSize}, s={stride})'
if imgSets.dtype != 'float32':
imgSets = imgSets.astype(np.float32)
return np.ma.array([generatePatchesPerImgSet(imgSet, patchSize, stride) for imgSet in tqdm(imgSets, desc=desc)])
def generatePatchesPerImgSet(images: np.ma.masked_array, patchSize: int, stride: int) -> np.ma.masked_array:
'''
Generate patches of images systematically.
Input:
images: np.ma.masked_array[numImgPerImgSet, channels, height, width]
patchSize: int
stride: int
Output:
np.ma.masked_array[numImgPerImgSet * numPatches, channels, patchSize, patchSize]
'''
tensorImg = torch.tensor(images)
tensorMsk = torch.tensor(images.mask)
numMskPerImgSet, channels, height, width = images.shape
patchesImg = tensorImg.unfold(0, numMskPerImgSet, numMskPerImgSet).unfold(
1, channels, channels).unfold(2, patchSize, stride).unfold(3, patchSize, stride)
patchesImg = patchesImg.reshape(-1, channels, patchSize, patchSize) # [numImgPerImgSet * numPatches, C, H, W]
patchesImg = patchesImg.numpy()
patchesMsk = tensorMsk.unfold(0, numMskPerImgSet, numMskPerImgSet).unfold(
2, patchSize, stride).unfold(3, patchSize, stride)
patchesMsk = patchesMsk.reshape(-1, channels, patchSize, patchSize)
patchesMsk = patchesMsk.numpy()
return np.ma.masked_array(patchesImg, mask=patchesMsk)
def registerImages(allImgLR: np.ndarray, allMskLR: np.ndarray) -> np.ma.masked_array:
'''
For each imgset, align all its imgs into one coordinate system.
The reference image will be the clearest one. (ie the one withe highest QM accumalitive sum)
Input:
allImgLR: np.ndarray[numImgSet, numImgPerImgSet, channel, height, width]
allMskLR: np.ndarray[numImgSet, numMskPerImgSet, channel, height, width]
Output:
output: np.ma.masked_array with the same dimension
'''
# '[ INFO ] Loading LR masks and dumping '
desc = '[ INFO ] Registering LR images '
return np.ma.array([registerImagesInSet(allImgLR[i], allMskLR[i])
for i in tqdm(range(allImgLR.shape[0]), desc=desc)])
def registerImagesInSet(imgLR: np.ndarray, mskLR: np.ndarray) -> np.ma.masked_array:
'''
Takes in an imgset LR masks and images.
Sorts it and picks a reference, then register.
Input:
imgLR: np.ndarray[numImgPerImgSet, channel, height, width]
mskLR: np.ndarray[numMskPerImgSet, channel, height, width]
Output
regImgMskLR: np.ma.masked_array[numMskPerImgSet, channel, height, width]
This array has a property mask where in if used, returns a boolean array
with the same dimension as the data.
https://docs.scipy.org/doc/numpy-1.15.0/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray.data
'''
sortedIdx = np.argsort([-np.count_nonzero(msk) for msk in mskLR])
clearestToDirtiestImg = imgLR[sortedIdx]
clearestToDirtiestMsk = mskLR[sortedIdx]
referImg = clearestToDirtiestImg[0]
for i, (img, msk) in enumerate(zip(clearestToDirtiestImg, clearestToDirtiestMsk)):
if i == 0:
regImgMskLR = np.expand_dims(np.ma.masked_array(img, mask=~msk), axis=0)
else:
regImg, regMsk = registerFrame(img, msk.astype(bool), referImg)
mskdArray = np.expand_dims(np.ma.masked_array(regImg, mask=~(regMsk > 0)), axis=0)
regImgMskLR = np.ma.concatenate((regImgMskLR, mskdArray))
return regImgMskLR
def registerFrame(img: np.ndarray, msk: np.ndarray, referenceImg: np.ndarray, tech='freq') -> Tuple[np.ndarray, np.ndarray]:
'''
Input:
img: np.ndarray[channel, height, width]
msk: np.ndarray[channel, height, width]
referenceImg: np.ndarray[channel, height, width]
Output:
Tuple(regImg, regMsk)
regImg: np.ndarray[channel, height, width]
regMsk: np.ndarray[channel, height, width]
'''
if tech == 'time':
shiftArray = masked_register_translation(referenceImg, img, msk)
regImg = shift(img, shiftArray, mode='reflect')
regMsk = shift(msk, shiftArray, mode='constant', cval=0)
if tech == 'freq':
shiftArray, _, _ = register_translation(referenceImg, img)
regImg = fourier_shift(np.fft.fftn(img), shiftArray)
regImg = np.fft.ifftn(regImg)
regImg = regImg.real
regMsk = fourier_shift(np.fft.fftn(msk), shiftArray)
regMsk = np.fft.ifftn(regMsk)
regMsk = regMsk.real
return (regImg, regMsk)
def convertToMaskedArray(imgSets: np.ndarray, mskSets: np.ndarray) -> np.ma.masked_array:
'''
Convert Image and Mask array pair to a masked array.
Especially made for HR images.
Input:
imgSets: np.ndarray[numImgSet, numImgPerImgSet, channel, height, width]
mskSets: np.ndarray[numImgSet, numImgPerImgSet, channel, height, width]
Output:
imgMskSets: np.ma.masked_array[numImgSet, numImgPerImgSet, channel, height, width]
'''
imgSets = np.squeeze(imgSets, axis=1) # [numImgSet, channel, height, width]
mskSets = np.squeeze(mskSets, axis=1) # [numImgSet, channel, height, width]
imgMskSets = np.ma.array([np.ma.masked_array(img, mask=~msk)
for img, msk in zip(imgSets, mskSets)]) # [numImgSet, channel, height, width]
imgMskSets = np.expand_dims(imgMskSets, axis=1) # [numImgSet, 1, channel, height, width]
return imgMskSets
def removeCorruptedTrainImageSets(imgMskLR: np.ma.masked_array, imgMskHR: np.ma.masked_array,
clarityThreshold: float) -> Tuple[np.ma.masked_array, np.ma.masked_array]:
'''
Remove imageset if ALL its LR frames is less than the given clarity threshold.
Input:
imgMskLR: np.ma.masked_array[numImgSet, numImgPerImgSet, channel, height, width]
imgMskHR: np.ma.masked_array[numImgSet, 1, channel, height, width]
clarityThreshold: float
Output:
trimmedImgMskLR: np.ma.masked_array[newNumImgSet, numImgPerImgSet, channel, height, width]
trimmedImgMskHR: np.ma.masked_array[newNumImgSet, 1, channel, height, width]
where newNumImgSet <= numImgSet
'''
desc = '[ INFO ] Removing corrupted ImageSets '
booleanMask = np.array([isImageSetNotCorrupted(imgSet, clarityThreshold) for imgSet in tqdm(imgMskLR, desc=desc)])
trimmedImgMskLR = imgMskLR[booleanMask]
trimmedImgMskHR = imgMskHR[booleanMask]
return (trimmedImgMskLR, trimmedImgMskHR)
def removeCorruptedTestImageSets(imgMskLR: np.ma.masked_array,
clarityThreshold: float) -> np.ma.masked_array:
'''
Remove imageset if ALL its LR frames is less than the given clarity threshold.
Input:
imgMskLR: np.ma.masked_array[numImgSet, numImgPerImgSet, channel, height, width]
clarityThreshold: float
Output:
trimmedImgMskLR: np.ma.masked_array[newNumImgSet, numImgPerImgSet, channel, height, width]
where newNumImgSet <= numImgSet
'''
desc = '[ INFO ] Removing corrupted ImageSets '
booleanMask = np.array([isImageSetNotCorrupted(imgSet, clarityThreshold)
for imgSet in tqdm(imgMskLR, desc=desc)])
trimmedImgMskLR = imgMskLR[booleanMask]
return trimmedImgMskLR
def isImageSetNotCorrupted(imgSet: np.ma.masked_array, clarityThreshold: float) -> bool:
'''
Determine if all the LR images are not clear enough.
Return False if ALL LR image clarity is below threshold.
Input:
imgSet: np.ma.masked_array[numImgPerImgSet, channel, height, width]
clarityThreshold: float
Output:
boolean that answers the question is ImageSet not Corrupted?
'''
# totalPixels = imgSet.shape[2] * imgSet.shape[3] # width * height
isImageClearEnough = np.array([np.count_nonzero(img.mask)/(img.shape[1] * img.shape[2]) < (1-clarityThreshold)
for img in imgSet])
return np.sum(isImageClearEnough) != 0
def pickClearLRImgsPerImgSet(imgMskLR: np.ma.masked_array,
numImgToPick: int, clarityThreshold: float) -> np.ma.masked_array:
'''
Pick clearest frames per ImgSet.
Before picking, we remove all frames that don't satisfy the clarity threshold.
After removing the said frames, in the event that the remaining LR frames is less than
the number of img to pick, we randomly pick among the clear frames to satisfy number of frames.
(This might be a form of regularization...)
Input:
imgMskLR: np.ma.masked_array[newNumImgSet, numImgPerImgSet, channel, height, width]
numImgToPick: int
Output:
trimmedImgMskLR: np.ma.masked_array[newNumImgSet, numImgToPick, channel, height, width]
where numImgToPick <= numImgPerImgSet
'''
desc = f'[ INFO ] Picking top {numImgToPick} clearest images '
return np.ma.array([pickClearImg(filterImgMskSet(imgMsk, clarityThreshold), numImgToPick=numImgToPick)
for imgMsk in tqdm(imgMskLR, desc=desc)])
def pickClearImg(imgMsk: np.ma.masked_array, numImgToPick: int) -> np.ma.masked_array:
'''
Pick clearest low resolution images!
Input:
imgMsk: np.ma.masked_array[numImgPerImgSet, channel, height, width]
numImgToPick: int
Ouput:
trimmedImgMsk: np.ma.masked_array[newNumImgPerImgSet, channel, height, width]
where newNumImgPerImgSet <= numImgPerImgSet might not hold.
'''
sortedIndices = np.argsort(-(np.sum(imgMsk.mask, axis=(1, 2, 3))))
sortedImgMskArray = imgMsk[sortedIndices]
if numImgToPick < len(imgMsk):
trimmedImgMsk = sortedImgMskArray[:numImgToPick]
else:
trimmedImgMsk = np.copy(sortedImgMskArray)
while len(trimmedImgMsk) < numImgToPick:
print('Short on data!')
shuffledIndices = np.random.choice(sortedIndices, size=len(sortedIndices), replace=False)
toAppend = imgMsk[shuffledIndices]
trimmedImgMsk = np.ma.concatenate((trimmedImgMsk, toAppend))
trimmedImgMsk = trimmedImgMsk[:numImgToPick]
return trimmedImgMsk
def filterImgMskSet(imgSet: np.ma.masked_array, clarityThreshold: float) -> np.ma.masked_array:
'''
This function is the same as isImageSetNotCorrupted.
except that the out put is a masked version of its array input.
Input:
imgSet: np.ma.masked_array[numImgPerImgSet, channel, height, width]
clarityThreshold: float
Output:
filteredImgSet: np.ma.masked_array[newNumImgPerImgSet, channel, height, width]
where newNumImgPerImgSet <= numImgPerImgSet
'''
# totalPixels = imgSet.shape[2] * imgSet.shape[3] # width * height
isImageClearEnough = np.array([np.count_nonzero(img.mask)/(img.shape[1] * img.shape[2]) < (1-clarityThreshold)
for img in imgSet]) # boolean mask
filteredImgSet = imgSet[isImageClearEnough]
return filteredImgSet
def loadData(arrayDir: str, band: str):
'''
Input:
arrayDir: str -> the path folder for which you saved .npy files
band: str -> 'NIR' or 'RED'
isTrainData: bool -> set to true if dealing with the train dataset
Output:
List[Tuple(train data), Tuple(test data)]
'''
# Check input dir validity
if not os.path.exists(arrayDir):
raise Exception("[ ERROR ] Folder path does not exists...")
if not os.listdir(arrayDir):
raise Exception("[ ERROR ] No files in the provided directory...")
TRAINimgLR = np.load(os.path.join(arrayDir, f'TRAINimgLR_{band}.npy'), allow_pickle=True)
TRAINimgHR = np.load(os.path.join(arrayDir, f'TRAINimgHR_{band}.npy'), allow_pickle=True)
TRAINmskLR = np.load(os.path.join(arrayDir, f'TRAINmskLR_{band}.npy'), allow_pickle=True)
TRAINmskHR = np.load(os.path.join(arrayDir, f'TRAINmskHR_{band}.npy'), allow_pickle=True)
TESTimgLR = np.load(os.path.join(arrayDir, f'TESTimgLR_{band}.npy'), allow_pickle=True)
TESTmskLR = np.load(os.path.join(arrayDir, f'TESTmskLR_{band}.npy'), allow_pickle=True)
TRAIN = (TRAINimgLR, TRAINmskLR, TRAINimgHR, TRAINmskHR)
TEST = (TESTimgLR, TESTmskLR)
return TRAIN, TEST
def loadAndSaveRawData(rawDataDir: str, arrayDir: str, band: str, isGrayScale=True, isTrainData=True):
'''
This function loads every imageset and dumps it into one giant array.
We do this because of memory constraints...
If you have about 64 GB of ram and about 72~128GB of swap space,
you might opt not using this function.
Input:
rawDataDir: str -> downloaded raw data directory
arrayDir: str -> the directory to which you will dump the numpy array
band: str -> 'NIR' or 'RED'
isGrayScale: bool -> Set to true if you are dealing with grayscale image
Output:
Array file with dimensions
[numImgSet, numImgPerImgSet, channel, height, width]
'''
# Check if arrayDir exist, build if not.
if not os.path.exists(arrayDir):
os.makedirs(arrayDir)
# Is train data?
key = 'TRAIN' if isTrainData else 'TEST'
# Get directories (OS agnostic)
trainDir = os.path.join(rawDataDir, key.lower(), band)
dirList = sorted(glob.glob(os.path.join(trainDir, 'imgset*')))
# Load all low resolution images in a massive array and dump!
# The resulting numpy array has dimensions [numImgSet, numLowResImgPerImgSet, channel, height, width]
descForImgLR = '[ INFO ] Loading LR images and dumping '
imgLR = np.array([np.array([io.imread(fName).transpose((2, 0, 1)) if not isGrayScale
else np.expand_dims(io.imread(fName), axis=0)
for fName in sorted(glob.glob(os.path.join(dirName, 'LR*.png')))])
for dirName in tqdm(dirList, desc=descForImgLR)])
imgLR.dump(os.path.join(arrayDir, f'{key}imgLR_{band}.npy'))
# Load all low resolution masks in a massive array and dump!
# The resulting numpy array has dimensions [numImgSet, numLowResMaskPerImgSet, channel, height, width]
descForMaskLR = '[ INFO ] Loading LR masks and dumping '
mskLR = np.array([np.array([io.imread(fName).transpose((2, 0, 1)) if not isGrayScale
else np.expand_dims(io.imread(fName), axis=0)
for fName in sorted(glob.glob(os.path.join(dirName, 'QM*.png')))])
for dirName in tqdm(dirList, desc=descForMaskLR)])
mskLR.dump(os.path.join(arrayDir, f'{key}mskLR_{band}.npy'))
if isTrainData:
# Load all high resolution images in a massive array and dump!
# The resulting numpy array has dimensions [numImgSet, 1, channel, height, width]
descForImgHR = '[ INFO ] Loading HR images and dumping '
imgHR = np.array([io.imread(os.path.join(dirName, 'HR.png')).transpose((2, 0, 1)) if not isGrayScale
else np.expand_dims(io.imread(os.path.join(dirName, 'HR.png')), axis=0)
for dirName in tqdm(dirList, desc=descForImgHR)])
# For count of HR pics which is 1.
imgHR = np.expand_dims(imgHR, axis=1)
imgHR.dump(os.path.join(arrayDir, f'{key}imgHR_{band}.npy'))
# Load all high resolution images in a massive array and dump!
# The resulting numpy array has dimensions [numImgSet, 1, channel, height, width]
descForMaskHR = '[ INFO ] Loading HR masks and dumping '
mskHR = np.array([io.imread(os.path.join(dirName, 'SM.png')).transpose((2, 0, 1)) if not isGrayScale
else np.expand_dims(io.imread(os.path.join(dirName, 'SM.png')), axis=0)
for dirName in tqdm(dirList, desc=descForMaskHR)])
# For count of HR pics which is 1.
mskHR = np.expand_dims(mskHR, axis=1)
mskHR.dump(os.path.join(arrayDir, f'{key}mskHR_{band}.npy'))
def saveArrays(inputDictionary: Dict, parentDir: str, band: str):
'''
Saves numpy arrays per imageset.
This method serves as an intermediate checkpoint for low memry users.
inputDictionary = { 'imgLRSetsUpscaled': [],
'imgLRSets': [],
'imgHRSets': [],
'maskLRSetsUpscaled': [],
'maskLRSets': [],
'maskHRSets': [],
'names': []}
'''
# Define directory
dirToSave = os.path.join(parentDir, 'numpyArrays', band)
if not os.path.exists(dirToSave):
os.mkdir(dirToSave)
# Iterate through imageset arays and save them
numSets = len(inputDictionary['names'])
for i in tqdm(range(numSets), desc='[ INFO ] Saving numpy arrays '):
np.save(os.path.join(dirToSave,
'imgLRSetsUpscaled_{}.npy'.format(inputDictionary['names'][i])),
inputDictionary['imgLRSetsUpscaled'][i], allow_pickle=True)
np.save(os.path.join(dirToSave,
'imgLRSets_{}.npy'.format(inputDictionary['names'][i])),
inputDictionary['imgLRSets'][i], allow_pickle=True)
np.save(os.path.join(dirToSave,
'imgHRSets_{}.npy'.format(inputDictionary['names'][i])),
inputDictionary['imgHRSets'][i], allow_pickle=True)
np.save(os.path.join(dirToSave,
'maskLRSetsUpscaled_{}.npy'.format(inputDictionary['names'][i])),
inputDictionary['maskLRSetsUpscaled'][i], allow_pickle=True)
np.save(os.path.join(dirToSave,
'maskLRSets_{}.npy'.format(inputDictionary['names'][i])),
inputDictionary['maskLRSets'][i], allow_pickle=True)
np.save(os.path.join(dirToSave,
'maskHRSets_{}.npy'.format(inputDictionary['names'][i])),
inputDictionary['maskHRSets'][i], allow_pickle=True)
def imageSetToNumpyArrayHelper(imageSetsUpscaled: Dict, imageSets: Dict, isGrayScale: bool, isNHWC: bool):
'''
Helper function for imageSetToNumpyArray function.
Iterates thru all the elemetns in the dictionary and applies the imageSetToNumpyArray function
'''
# Initialize Output dictionary
output = {'imgLRSetsUpscaled': [],
'imgLRSets': [],
'imgHRSets': [],
'maskLRSetsUpscaled': [],
'maskLRSets': [],
'maskHRSets': [],
'names': []}
names = []
for name in tqdm(imageSets.keys(), desc='[ INFO ] Converting imageSets into numpy arrays'):
currSetUpscaled = imageSetsUpscaled[name]
ioImgPairUpscaled, ioMaskPairUpscaled = imageSetToNumpyArray(imageSet=currSetUpscaled,
isGrayScale=isGrayScale, isNWHC=isNHWC)
lrImgUpscaled, hrImg = ioImgPairUpscaled
lrMaskUpscaled, hrMask = ioMaskPairUpscaled
currSet = imageSets[name]
ioImgPair, ioMaskPair = imageSetToNumpyArray(imageSet=currSet,
isGrayScale=isGrayScale, isNWHC=isNHWC)
lrImg, _ = ioImgPairUpscaled
lrMask, _ = ioMaskPairUpscaled
output['imgLRSetsUpscaled'].append(lrImgUpscaled)
output['maskLRSetsUpscaled'].append(lrMaskUpscaled)
output['imgLRSets'].append(lrImg)
output['imgHRSets'].append(hrImg)
output['maskLRSets'].append(lrMask)
output['maskHRSets'].append(hrMask)
output['names'].append(name)
return output
def generatePatchDatasetFromSavedFile(srcFolder: str, dstFolder: str, names: List[str], useUpsample: bool,
patchSize: int, thresholdPatchesPerImgSet: int, thresholdClarityLR: float,
thresholdClarityHR: float):
'''
Sample patches from the low res image.
Patches are considered good at it is atleast n% cleared.
Input:
inputDictionary: Dict -> a dictionary containing the LR images and its upscaled versions, the HR images,
upsampleScale, names, shifts, and respective masks.
patchSize: int -> size of patch to sample
thresholdPatchesPerImgSet: int
Output:
patchesPerImgSet: Dict
'''
# Safety checks
if not os.path.exists(dstFolder):
os.mkdir(dstFolder)
# Set maximum number of trials to get a viable Patches
MAX_TRIAL = 100000
PATCH_PER_SET = 9
# Initialize outputDict
outputDict = {}
# Do we use the upsampled images?
isUpsample = ''
scale = 3
if useUpsample:
isUpsample = 'Upscaled'
scale = 1
# Extract constants
numSets = len(names)
sampleFname = os.path.join(srcFolder, 'imgLRSets{}_{}.npy'.format(isUpsample, names[0]))
sampleArray = np.load(sampleFname)
shapeUpscaled = list(sampleArray[0][0].shape)[1:]
totalNumPixInPatch = patchSize * patchSize
# Iterate thru all sets
for i in tqdm(range(numSets), desc='[ INFO ] Finding patches '):
# Extract relevant arrays from the inputDictionary
currImgSetLR = loadAndRemove(os.path.join(srcFolder, 'imgLRSets{}_{}.npy'.format(isUpsample, names[i])))
currMaskSetLR = loadAndRemove(os.path.join(srcFolder, 'maskLRSets{}_{}.npy'.format(isUpsample, names[i])))
currImgSetHR = np.load(os.path.join(srcFolder, 'imgHRSets_{}.npy'.format(names[i])))
currMaskSetHR = np.load(os.path.join(srcFolder, 'maskHRSets_{}.npy'.format(names[i])))
# Initialize accumulators
currTrial = 0
currNumPatches = 0
coordinatesForTheSet = []
imgLRPatches, imgHRPatches = [], []
maskLRPatches, maskHRPatches = [], []
coordinates = []
shiftsPatch = []
# Trials to SUCCESS
while True:
# Define stopping condition: MAX_TRIAL is exceeded or thresholdPatchesPerImgSet is satisfied
if currNumPatches >= thresholdPatchesPerImgSet or currTrial >= MAX_TRIAL:
if imgLRPatches:
np.save(os.path.join(dstFolder, 'imgLRPatches_{}.npy'.format(names[i])),
np.stack(imgLRPatches), allow_pickle=True)
np.save(os.path.join(dstFolder, 'imgHRPatches_{}.npy'.format(names[i])),
np.stack(imgHRPatches), allow_pickle=True)
np.save(os.path.join(dstFolder, 'maskLRPatches_{}.npy'.format(names[i])),
np.stack(maskLRPatches), allow_pickle=True)
np.save(os.path.join(dstFolder, 'maskHRPatches_{}.npy'.format(names[i])),
np.stack(maskHRPatches), allow_pickle=True)
np.save(os.path.join(dstFolder, 'shifts_{}.npy'.format(names[i])),
np.stack(shiftsPatch), allow_pickle=True)
break
# Sample topleft and bottomright ccoordinates for a patch
topLeft, btmRight = sampleCoordinates(imgSize=shapeUpscaled, patchSize=[patchSize, patchSize])
xZero, yZero = topLeft
xOne, yOne = btmRight
# Extract patches using the sampled coordinates
patchImgLR = currImgSetLR[:, :, yZero: yOne, xZero: xOne] # [numSamples, channels, height, width]
patchImgHR = currImgSetHR[:, :, yZero*scale: yOne*scale, xZero*scale: xOne*scale]
patchMaskLR = currMaskSetLR[:, :, yZero: yOne, xZero: xOne] > 0 # [numSamples, channels, height, width]
patchMaskHR = currMaskSetHR[:, :, yZero*scale: yOne*scale, xZero*scale: xOne*scale] > 0
# Check clarity of the low resulution patches
clearPercentageArrayLR = np.sum(patchMaskLR, axis=(1, 2, 3)) / totalNumPixInPatch
isSampleClearLR = clearPercentageArrayLR > thresholdClarityLR
isSampleGoodLR = np.sum(isSampleClearLR) > PATCH_PER_SET
clearPercentageArrayHR = np.sum(patchMaskHR, axis=(1, 2, 3)) / totalNumPixInPatch
isSampleClearHR = clearPercentageArrayHR > thresholdClarityHR
isSampleGoodHR = np.sum(isSampleClearHR)
if isSampleGoodLR and isSampleGoodHR:
imgLRPatches.append(patchImgLR)
imgHRPatches.append(patchImgHR)
maskLRPatches.append(patchMaskLR)
maskHRPatches.append(patchMaskHR)
coordinatesForTheSet.append((topLeft, btmRight))
shiftsPatch.append(shift[i])
currNumPatches += 1
currTrial += 1
outputDict[names[i]] = coordinatesForTheSet
return outputDict
def loadAndRemove(filePath):
loadedFile = | np.load(filePath, allow_pickle=True) | numpy.load |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Union, Dict, List, Optional, Tuple
import numpy as np
import plotly.graph_objs as go
from ax.core.experiment import Experiment
from ax.core.objective import MultiObjective
from ax.core.optimization_config import (
MultiObjectiveOptimizationConfig,
OptimizationConfig,
)
from ax.core.outcome_constraint import ObjectiveThreshold
from ax.exceptions.core import UserInputError
from ax.plot.base import CI_OPACITY, DECIMALS, AxPlotConfig, AxPlotTypes
from ax.plot.color import COLORS, rgba, DISCRETE_COLOR_SCALE
from ax.plot.helper import extend_range, _format_CI, _format_dict
from ax.plot.pareto_utils import ParetoFrontierResults
from ax.utils.common.typeutils import checked_cast, not_none
from scipy.stats import norm
DEFAULT_CI_LEVEL: float = 0.9
VALID_CONSTRAINT_OP_NAMES = {"GEQ", "LEQ"}
def _make_label(
mean: float, sem: float, name: str, is_relative: bool, Z: Optional[float]
) -> str:
estimate = str(round(mean, DECIMALS))
perc = "%" if is_relative else ""
ci = (
""
if (Z is None or np.isnan(sem))
else _format_CI(estimate=mean, sd=sem, relative=is_relative, zval=Z)
)
return f"{name}: {estimate}{perc} {ci}<br>"
def _filter_outliers(Y: np.ndarray, m: float = 2.0) -> np.ndarray:
std_filter = abs(Y - np.median(Y, axis=0)) < m * np.std(Y, axis=0)
return Y[np.all(abs(std_filter), axis=1)]
def scatter_plot_with_pareto_frontier_plotly(
Y: np.ndarray,
Y_pareto: Optional[np.ndarray],
metric_x: Optional[str],
metric_y: Optional[str],
reference_point: Optional[Tuple[float, float]],
minimize: Optional[Union[bool, Tuple[bool, bool]]] = True,
) -> go.Figure:
"""Plots a scatter of all points in ``Y`` for ``metric_x`` and ``metric_y``
with a reference point and Pareto frontier from ``Y_pareto``.
Points in the scatter are colored in a gradient representing their trial index,
with metric_x on x-axis and metric_y on y-axis. Reference point is represented
as a star and Pareto frontier –– as a line. The frontier connects to the reference
point via projection lines.
NOTE: Both metrics should have the same minimization setting, passed as `minimize`.
Args:
Y: Array of outcomes, of which the first two will be plotted.
Y_pareto: Array of Pareto-optimal points, first two outcomes in which will be
plotted.
metric_x: Name of first outcome in ``Y``.
metric_Y: Name of second outcome in ``Y``.
reference_point: Reference point for ``metric_x`` and ``metric_y``.
minimize: Whether the two metrics in the plot are being minimized or maximized.
"""
title = "Observed metric values"
if isinstance(minimize, bool):
minimize = (minimize, minimize)
Xs = Y[:, 0]
Ys = Y[:, 1]
experimental_points_scatter = [
go.Scatter(
x=Xs,
y=Ys,
mode="markers",
marker={
"color": np.linspace(0, 100, int(len(Xs) * 1.05)),
"colorscale": "magma",
"colorbar": {
"tickvals": [0, 50, 100],
"ticktext": [
1,
"iteration",
len(Xs),
],
},
},
name="Experimental points",
)
]
if Y_pareto is not None:
title += " with Pareto frontier"
if reference_point:
if minimize is None:
minimize = tuple(
reference_point[i] >= max(Y_pareto[:, i]) for i in range(2)
)
reference_point_star = [
go.Scatter(
x=[reference_point[0]],
y=[reference_point[1]],
mode="markers",
marker={
"color": rgba(COLORS.STEELBLUE.value),
"size": 25,
"symbol": "star",
},
)
]
extra_point_x = min(Y_pareto[:, 0]) if minimize[0] else max(Y_pareto[:, 0])
reference_point_line_1 = go.Scatter(
x=[extra_point_x, reference_point[0]],
y=[reference_point[1], reference_point[1]],
mode="lines",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
extra_point_y = min(Y_pareto[:, 1]) if minimize[1] else max(Y_pareto[:, 1])
reference_point_line_2 = go.Scatter(
x=[reference_point[0], reference_point[0]],
y=[extra_point_y, reference_point[1]],
mode="lines",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
reference_point_lines = [reference_point_line_1, reference_point_line_2]
Y_pareto_with_extra = np.concatenate(
(
[[extra_point_x, reference_point[1]]],
Y_pareto,
[[reference_point[0], extra_point_y]],
),
axis=0,
)
pareto_step = [
go.Scatter(
x=Y_pareto_with_extra[:, 0],
y=Y_pareto_with_extra[:, 1],
mode="lines",
line_shape="hv",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
]
range_x = (
extend_range(lower=min(Y_pareto[:, 0]), upper=reference_point[0])
if minimize[0]
else extend_range(lower=reference_point[0], upper=max(Y_pareto[:, 0]))
)
range_y = (
extend_range(lower=min(Y_pareto[:, 1]), upper=reference_point[1])
if minimize[1]
else extend_range(lower=reference_point[1], upper=max(Y_pareto[:, 1]))
)
else: # Reference point was not specified
pareto_step = [
go.Scatter(
x=Y_pareto[:, 0],
y=Y_pareto[:, 1],
mode="lines",
line_shape="hv",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
]
reference_point_lines = reference_point_star = []
range_x = extend_range(lower=min(Y_pareto[:, 0]), upper=max(Y_pareto[:, 0]))
range_y = extend_range(lower=min(Y_pareto[:, 1]), upper=max(Y_pareto[:, 1]))
else: # `Y_pareto` input was not specified
range_x = extend_range(lower=min(Y[:, 0]), upper=max(Y[:, 0]))
range_y = extend_range(lower=min(Y[:, 1]), upper=max(Y[:, 1]))
pareto_step = reference_point_lines = reference_point_star = []
layout = go.Layout(
title=title,
showlegend=False,
xaxis={"title": metric_x or "", "range": range_x},
yaxis={"title": metric_y or "", "range": range_y},
)
return go.Figure(
layout=layout,
data=pareto_step
+ reference_point_lines
+ experimental_points_scatter
+ reference_point_star,
)
def scatter_plot_with_pareto_frontier(
Y: np.ndarray,
Y_pareto: np.ndarray,
metric_x: str,
metric_y: str,
reference_point: Tuple[float, float],
minimize: bool = True,
) -> AxPlotConfig:
return AxPlotConfig(
data=scatter_plot_with_pareto_frontier_plotly(
Y=Y,
Y_pareto=Y_pareto,
metric_x=metric_x,
metric_y=metric_y,
reference_point=reference_point,
),
plot_type=AxPlotTypes.GENERIC,
)
def _get_single_pareto_trace(
frontier: ParetoFrontierResults,
CI_level: float,
legend_label: str = "mean",
trace_color: Tuple[int] = COLORS.STEELBLUE.value,
show_parameterization_on_hover: bool = True,
) -> go.Scatter:
primary_means = frontier.means[frontier.primary_metric]
primary_sems = frontier.sems[frontier.primary_metric]
secondary_means = frontier.means[frontier.secondary_metric]
secondary_sems = frontier.sems[frontier.secondary_metric]
absolute_metrics = frontier.absolute_metrics
all_metrics = frontier.means.keys()
if frontier.arm_names is None:
arm_names = [f"Parameterization {i}" for i in range(len(frontier.param_dicts))]
else:
arm_names = [f"Arm {name}" for name in frontier.arm_names]
if CI_level is not None:
Z = 0.5 * norm.ppf(1 - (1 - CI_level) / 2)
else:
Z = None
labels = []
for i, param_dict in enumerate(frontier.param_dicts):
label = f"<b>{arm_names[i]}</b><br>"
for metric in all_metrics:
metric_lab = _make_label(
mean=frontier.means[metric][i],
sem=frontier.sems[metric][i],
name=metric,
is_relative=metric not in absolute_metrics,
Z=Z,
)
label += metric_lab
parameterization = (
_format_dict(param_dict, "Parameterization")
if show_parameterization_on_hover
else ""
)
label += parameterization
labels.append(label)
return go.Scatter(
name=legend_label,
legendgroup=legend_label,
x=secondary_means,
y=primary_means,
error_x={
"type": "data",
"array": Z * np.array(secondary_sems),
"thickness": 2,
"color": rgba(trace_color, CI_OPACITY),
},
error_y={
"type": "data",
"array": Z * | np.array(primary_sems) | numpy.array |
import numpy as np
def get_data(path):
with open(path) as f:
content = f.readlines()
X = np.array(content)
Y = []
for i in range(60):
A = X[i].split(' ')
A = A[1:]
k = 0
while (k < len(A)):
if ((A[k] == '') or (A[k] == ' ')):
del A[k]
continue
A[k] = float(A[k].strip())
k += 1
Y.append(A)
for i in range(len(Y)):
Y[i] = np.array(Y[i])
Y = np.array(Y)
return Y[:, :-1], Y[:, -1]
def normalize_and_add_ones(X):
X = np.array(X)
X_max = np.array([[np.amax(X[:, column_id]) for column_id in range(X.shape[1])]
for _ in range(X.shape[0])])
X_min = np.array([[ | np.amin(X[:, column_id]) | numpy.amin |
import numpy as np
import pandas as pd
from collections import OrderedDict
import os
import json
import shutil
import warnings
import subprocess
def nest(l, depth=1, reps=1):
"""create a nested list of depth 'depth' and with 'reps' repititions"""
if depth == 0:
return(None)
elif depth == 1:
return(l)
else:
return([nest(l, depth-1, reps)] * reps)
def unnest(l):
"""unnest a nested list l"""
return([x for y in l for x in mlist(y)])
def constant_array(constant, *args):
"""create an array filled with the 'constant' of shape *args"""
if len(args) == 0:
return(constant)
else:
return(np.array([constant_array(constant, *args[1:])]*args[0]))
def zeros(*args):
"""create a constant array of shape *args"""
return(constant_array(0.0, *args))
def ones(*args):
"""create a constant array of shape *args"""
return(constant_array(1.0, *args))
def mlist(x):
"""make sure x is a list"""
if isinstance(x, list):
return(x)
else:
return([x])
def idict(list_):
"""create an ordered dict of i -> 'list_'[i]"""
return(OrderedDict(zip(range(len(list_)), list_)))
def ridict(list_):
"""create an ordered dict of 'list_'[i] -> i"""
return(OrderedDict({v:k for k, v in idict(list_).items()}))
def ilist(dict_):
return(list(dict_.values()))
def rilist(dict_):
return(list(dict_.keys()))
def sort_dict(dict_):
return([(k, v) for k, v in sorted(list(dict_.items()), key= lambda x: x[0])])
class HyperFrame:
"""
A numpy array with dimension labels and named indices of each dimension for
storage and access to high-dimensional data.
Attributes:
dimension_labels (list[string]): dimension labels
index_labels (OrderedDict[string, list[string]]): dimension label -> index labels
dim_labels (OrderedDict[int, string]): index -> dimension label
rdim_labels (OrderedDict[string, int]): dimension label -> index
val_labels (OrderedDict[string, OrderedDict[int, string]]): dimension label -> index -> index label
rval_labels (OrderedDict[string, OrderedDict[string, int]]): dimension label -> index label -> index
data (np.array): data
shape (set): shape of the data
"""
def __init__(self, dimension_labels, index_labels, data=None):
"""
The constructor of the HyperFrame class.
Parameters:
dimension_labels (list[string]): dimension labels
index_labels (dict[string, list[int | string]]): dimension_label -> index labels
data (np.array): data
"""
index_labels = OrderedDict(index_labels)
self.dimension_labels = dimension_labels
self.dim_labels = idict(dimension_labels)
self.rdim_labels = ridict(dimension_labels)
self.index_labels = index_labels
self.val_labels = OrderedDict({k: idict(v) for k, v in index_labels.items()})
self.rval_labels = OrderedDict({k: ridict(v) for k, v in index_labels.items()})
if data is None:
data = zeros(*[len(self.val_labels[dim_label]) for _, dim_label in self.dim_labels.items()])
self.data = data
self.shape = data.shape
HyperFrame._validate_hyperframe(self)
def len(self, *args):
return([self.shape[self.rdim_labels[a]] for a in args])
def sum(self, *args):
return(self.apply_f1(np.sum, *args))
def mean(self, *args):
return(self.apply_f1(np.mean, *args))
def min(self, *args):
return(self.apply_f1(np.min, *args))
def max(self, *args):
return(self.apply_f1(np.max, *args))
def apply_f1(self, f, *args):
if len(args) == 0:
args = self.dimension_labels
assert np.all([a in self.dimension_labels for a in args])
dims = sorted([self.rdim_labels[a] for a in args])
ndata = self.data
for i, dim in enumerate(dims):
ndata = f(ndata, dim-i)
if isinstance(ndata, type(np.array([0]))):
new_dimension_labels = [d for d in self.dimension_labels if d not in args]
new_index_labels = OrderedDict([(k, v) for k, v in self.index_labels.items() if k not in args])
return(HyperFrame(new_dimension_labels, new_index_labels, ndata))
else:
assert np.issubdtype(type(ndata), np.number)
return(ndata)
def copy(self):
"""
copy this HyperFrame
Returns:
HyperFrame: a new HyperFrame with the same data
"""
return(HyperFrame( self.dimension_labels, OrderedDict(self.index_labels) , self.data.copy()))
def iget(self, *args, **kwargs):
"""
Get a subset of the dataframe by EITHER args or kwargs
Parameters:
*args (list[string]): values on each dimension by which the data should be subset, dimensions that should
not be subset should have a value not in the dimension index
**kwargs (dict[string, int | string | list[int] | list[string]]): dimension labels -> value or values
to subset by
return_type (string) (in kwargs): in ["hyperframe", "pandas", "numpy"]
Returns:
HyperFrame | pd.DataFrame | pd.Series | np.array: subset of original data
"""
return_type = kwargs.pop("return_type", "hyperframe")
kwargs = self._build_kwargs(args, kwargs)
ndim_labels = [(i, v)
for i, v in self.dim_labels.items()
if v not in kwargs.keys() or len(kwargs[v]) > 1]
ndim_labels = [x[1] for x in sorted(ndim_labels, key=lambda y: y[0])]
nval_labels = {k: kwargs.get(k, v) for k, v in self.val_labels.items() if len(kwargs.get(k, v)) > 1}
nval_labels = {k: (mlist(v) if not isinstance(v, dict) else list(v.values())) for k, v in nval_labels.items()}
indices = self._construct_indices(kwargs, self.data.shape)
ndata = self.data[np.ix_(*indices)]
ndata = ndata.reshape(*[x for x in ndata.shape if x > 1])
subset = HyperFrame(ndim_labels, nval_labels, ndata)
return(HyperFrame._cast_return_value(subset, return_type))
@staticmethod
def _cast_return_value(hyperframe, return_type):
"""'cast' a hyperframe to a pandas or numpy object, or return unaltered"""
if return_type == "pandas":
return(HyperFrame._get_pandas_object(hyperframe))
elif return_type == "numpy":
return(hyperframe.data)
elif return_type == "hyperframe":
return(hyperframe)
else:
warnings.warn("return_type must be in ['hyperframe', 'pandas', 'numpy']")
@staticmethod
def _get_pandas_object(hyperframe):
"""
Turn a HyperFrame of dimensionality <= into a pandas object
Paramters:
hyperframe (HyperFrame)
Returns:
pd.DataFrame | pd.Series
"""
indices = [[v2 for k2, v2 in sort_dict(hyperframe.val_labels[v])]
for k, v in sort_dict(hyperframe.dim_labels)]
assert len(indices) > 0, "pandas objects must have at least one dimension"
assert len(indices) <= 2, "pandas objects cannot have {} dimensions".format(len(indices))
if len(indices) == 1:
return(pd.Series(hyperframe.data, index=indices[0]))
else:
return(pd.DataFrame(hyperframe.data, index=indices[0], columns = indices[1]))
def iget0(self, *args, return_type=None):
"""
Return data for the dimensions in *args by subsetting the other dimensions to the first index label
Parameters:
*args (list[string]): the dimensions to be preserved in full
return_type (string) (in kwargs): in ["hyperframe", "pandas", "numpy"]
Returns:
HyperFrame | pd.DataFrame | pd.Series | np.array: subset of original data
"""
assert len(args) > 0 and len(args) <= 2
assert np.all([a in self.dimension_labels for a in args])
kwargs = {v: self.val_labels[v][0] for i, v in self.dim_labels.items() if v not in args}
print(kwargs)
subset = self.iget(**kwargs)
return(HyperFrame._cast_return_value(subset, return_type))
def iset(self, new_data, *args, **kwargs):
"""
Replace a subset of the data with 'new_data'
Parameters:
new_data (np.array): new data
*args (list[string]): values on each dimension by which the data should be subset, dimensions that should
not be subset should have a value not in the dimension index
**kwargs (dict[string, int | string | list[int] | list[string]]): dimension labels -> value or values
to subset by
Returns:
HyperFrame: HyperFrame with changed data
"""
kwargs = self._build_kwargs(args, kwargs)
assert np.issubdtype(type(new_data), np.number) or ( isinstance(new_data, type( | np.array([0]) | numpy.array |
#====================================Like.py===================================#
# Created by <NAME> 2021
# Contains functions for interfacing with the fortran code in src/like
# the fortran likelihood code needs to be compiled first by running the make
# file in src/like
#==============================================================================#
from __future__ import print_function
from numpy import pi, sqrt, exp, zeros, size, shape, array, append, flipud, gradient
from numpy import trapz, interp, loadtxt, log10, log, savetxt, vstack, transpose
from numpy import ravel,tile,mean,inf,nan,amin,amax
from scipy.ndimage.filters import gaussian_filter1d
from scipy.integrate import cumtrapz
from numpy.linalg import norm
from scipy.special import gammaln
from Params import *
import LabFuncs
import NeutrinoFuncs
import WIMPFuncs
import shlex
import subprocess
import pprint
def Floor_2D(data,filt=True,filt_width=3,Ex_crit=1e10):
sig = data[1:,0]
m = data[0,1:]
n = size(m)
ns = size(sig)
Ex = flipud(transpose(data[1:,1:].T))
Ex[Ex>Ex_crit] = nan
Exmin = amin(Ex[Ex>0])
Ex[Ex==0] = Exmin
DY = zeros(shape=shape(Ex))
for j in range(0,n):
y = log10(Ex[:,j])
if filt:
y = gaussian_filter1d(gaussian_filter1d(y,sigma=3),filt_width)
dy = gradient(y,log10(sig[2])-log10(sig[1]))
dy = gaussian_filter1d(dy,filt_width)
else:
dy = gradient(y,log10(sig[2])-log10(sig[1]))
DY[:,j] = dy
NUFLOOR = zeros(shape=n)
#for j in range(0,n):
# DY[:,j] = gaussian_filter1d(DY[:,j],filt_width)
for j in range(0,n):
for i in range(0,ns):
if DY[ns-1-i,j]<=-2.0:
i0 = ns-1-i
i1 = i0+10
NUFLOOR[j] = 10.0**interp(-2,DY[i0:i1+1,j],log10(sig[i0:i1+1]))
DY[ns-1-i:-1,j] = nan
break
DY = -DY
DY[DY<2] = 2
return m,sig,NUFLOOR,DY
def NuFloor_1event(mvals,Nuc,nths=100):
# Load neutrino fluxes
Names,solar,E_nu_all,Flux_all,Flux_norm,Flux_err = NeutrinoFuncs.GetNuFluxes(0.0)
n_nu = shape(Flux_all)[0]
E_ths = logspace(log10(0.0001),log10(100.0),nths)
t = 0
R = | zeros(shape=nths) | numpy.zeros |
import pythreejs as three
import numpy as np
from time import time, sleep
from .Colors import colors
from ..utils import Observer, ColorMap
import threading
import copy
class DrawableMesh(Observer):
def __init__(self, geometry, mesh_color = None, reactive = False):
super(DrawableMesh, self).__init__()
self._external_color = colors.teal
self._internal_color = colors.orange
self._color_map = None
self._metric_string = None
self._c_map_string = None
self._label_colors = None
self.geometry = geometry
if reactive:
self.geometry.attach(self)
self.geometry_color = self.__initialize_geometry_color(mesh_color)
self.mesh = self.__initialize_mesh()
self.wireframe = self.__initialize_wireframe()
self.threejs_items = [self.mesh, self.wireframe]
self.updating = False
self.queue = False
def __initialize_geometry_color(self, mesh_color, geometry = None):
if geometry is None:
geometry = self.geometry
if mesh_color is None:
color = np.repeat(self._external_color.reshape(1, 3),
geometry.num_triangles*3, axis=0
)
if hasattr(self.geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
color[internal_color] = self._internal_color
return color
def update_wireframe_color(self, new_color):
self.wireframe.material.color = new_color
def update_wireframe_opacity(self, new_opacity):
self.wireframe.material.opacity = new_opacity
def update_internal_color(self, new_color, geometry = None):
if geometry is None:
geometry = self.geometry
self._internal_color = np.array(new_color)
if hasattr(geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
self.geometry_color[internal_color] = new_color
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1)
self.mesh.geometry.attributes['color'].data.array = interleaved
def update_external_color(self, new_color, geometry = None):
if geometry is None:
geometry = self.geometry
self._external_color = np.array(new_color)
if hasattr(geometry, "internals"):
internal_color = geometry.internal_triangles_idx()
self.geometry_color[np.logical_not(internal_color)] = new_color
else:
self.geometry_color[:] = new_color
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1)
self.mesh.geometry.attributes['color'].data.array = interleaved
def update_color_map(self, new_colors, geometry = None):
if geometry is None:
geometry = self.geometry
self.geometry_color[:] = geometry._as_threejs_colors(colors= new_colors)
colors = geometry._as_threejs_colors()
new_colors = self.geometry_color[colors]
tris, vtx_normals = geometry._as_threejs_triangle_soup()
interleaved = np.concatenate((tris, new_colors, vtx_normals), axis=1)
self.mesh.geometry.attributes['color'].data.array = interleaved
def compute_color_map(self, metric_string, c_map_string, geometry=None):
if geometry is None:
geometry = self.geometry
self._metric_string = metric_string
self._c_map_string = c_map_string
(min_range, max_range), metric = self.geometry.simplex_metrics[metric_string]
c_map = ColorMap.color_maps[c_map_string]
if min_range is None or max_range is None:
min_range = np.min(metric)
max_range = np.max(metric)
if ( | np.abs(max_range-min_range) | numpy.abs |
import sys
import numpy as np
import pandas as pd
# Constant
lRate = 0.05
iterTime = 3000
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def readFile(filename):
return pd.read_csv(filename).as_matrix().astype('float')
I = [0, 1, 3, 4, 5]
def regulate(X):
return np.concatenate((X, X[:, I] ** 2, X[:, I] ** 3, X[:, I] ** 4, X[:, I] ** 5, np.log(X[:, I] + 1e-10), (X[:, 0] * X[:, 3]).reshape(X.shape[0], 1), (X[:, 0] * X[:, 5]).reshape(X.shape[0], 1), (X[:, 0] * X[:, 5]).reshape(X.shape[0], 1) ** 2, (X[:, 3] * X[:, 5]).reshape(X.shape[0], 1), X[:, 6:] * X[:, 5].reshape(X.shape[0], 1), (X[:, 3] - X[:, 4]).reshape(X.shape[0], 1), (X[:, 3] - X[:, 4]).reshape(X.shape[0], 1) ** 3), axis = 1)
X_train, Y_train, X_test = readFile(sys.argv[1]), readFile(sys.argv[2]), readFile(sys.argv[3])
X_train, X_test = regulate(X_train), regulate(X_test)
NUM = 6000
X_train, Y_train = X_train[:-NUM], Y_train[:-NUM]
valid = (X_train[-NUM:], Y_train[-NUM:])
meanX, stdX = np.mean(X_train, axis = 0), np.std(X_train, axis = 0)
def cost(X, y, w):
pred = sigmoid(np.dot(X, w))
return -np.mean(y * np.log(pred + 1e-20) + (1 - y) * np.log((1 - pred + 1e-20)))
def scale(X, mean, std):
return (X - mean) / (std + 1e-20)
def evaluate(X, y, w):
p = sigmoid( | np.dot(X, w) | numpy.dot |
from collections import Counter
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal, assert_,
assert_equal, assert_array_almost_equal,
assert_array_equal)
from scipy.stats import shapiro
from scipy.stats._sobol import _test_find_index
from scipy.stats import qmc
from scipy.stats._qmc import (van_der_corput, n_primes, primes_from_2_to,
update_discrepancy,
QMCEngine, check_random_state)
class TestUtils:
def test_scale(self):
# 1d scalar
space = [[0], [1], [0.5]]
out = [[-2], [6], [2]]
scaled_space = qmc.scale(space, l_bounds=-2, u_bounds=6)
assert_allclose(scaled_space, out)
# 2d space
space = [[0, 0], [1, 1], [0.5, 0.5]]
bounds = np.array([[-2, 0], [6, 5]])
out = [[-2, 0], [6, 5], [2, 2.5]]
scaled_space = qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
assert_allclose(scaled_space, out)
scaled_back_space = qmc.scale(scaled_space, l_bounds=bounds[0],
u_bounds=bounds[1], reverse=True)
assert_allclose(scaled_back_space, space)
# broadcast
space = [[0, 0, 0], [1, 1, 1], [0.5, 0.5, 0.5]]
l_bounds, u_bounds = 0, [6, 5, 3]
out = [[0, 0, 0], [6, 5, 3], [3, 2.5, 1.5]]
scaled_space = qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds)
assert_allclose(scaled_space, out)
def test_scale_random(self):
np.random.seed(0)
sample = np.random.rand(30, 10)
a = -np.random.rand(10) * 10
b = np.random.rand(10) * 10
scaled = qmc.scale(sample, a, b, reverse=False)
unscaled = qmc.scale(scaled, a, b, reverse=True)
assert_allclose(unscaled, sample)
def test_scale_errors(self):
with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
space = [0, 1, 0.5]
qmc.scale(space, l_bounds=-2, u_bounds=6)
with pytest.raises(ValueError, match=r"Bounds are not consistent"
r" a < b"):
space = [[0, 0], [1, 1], [0.5, 0.5]]
bounds = np.array([[-2, 6], [6, 5]])
qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
with pytest.raises(ValueError, match=r"shape mismatch: objects cannot "
r"be broadcast to a "
r"single shape"):
space = [[0, 0], [1, 1], [0.5, 0.5]]
l_bounds, u_bounds = [-2, 0, 2], [6, 5]
qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds)
with pytest.raises(ValueError, match=r"Sample dimension is different "
r"than bounds dimension"):
space = [[0, 0], [1, 1], [0.5, 0.5]]
bounds = np.array([[-2, 0, 2], [6, 5, 5]])
qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
with pytest.raises(ValueError, match=r"Sample is not in unit "
r"hypercube"):
space = [[0, 0], [1, 1.5], [0.5, 0.5]]
bounds = np.array([[-2, 0], [6, 5]])
qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
with pytest.raises(ValueError, match=r"Sample is out of bounds"):
out = [[-2, 0], [6, 5], [8, 2.5]]
bounds = np.array([[-2, 0], [6, 5]])
qmc.scale(out, l_bounds=bounds[0], u_bounds=bounds[1],
reverse=True)
def test_discrepancy(self):
space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0)
space_2 = np.array([[1, 5], [2, 4], [3, 3], [4, 2], [5, 1], [6, 6]])
space_2 = (2.0 * space_2 - 1.0) / (2.0 * 6.0)
# From Fang et al. Design and modeling for computer experiments, 2006
assert_allclose(qmc.discrepancy(space_1), 0.0081, atol=1e-4)
assert_allclose(qmc.discrepancy(space_2), 0.0105, atol=1e-4)
# From <NAME>. et al. Mixture discrepancy for quasi-random point
# sets. Journal of Complexity, 29 (3-4), pp. 283-301, 2013.
# Example 4 on Page 298
sample = np.array([[2, 1, 1, 2, 2, 2],
[1, 2, 2, 2, 2, 2],
[2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2],
[1, 2, 2, 2, 1, 1],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 2, 2]])
sample = (2.0 * sample - 1.0) / (2.0 * 2.0)
assert_allclose(qmc.discrepancy(sample, method='MD'), 2.5000,
atol=1e-4)
assert_allclose(qmc.discrepancy(sample, method='WD'), 1.3680,
atol=1e-4)
assert_allclose(qmc.discrepancy(sample, method='CD'), 0.3172,
atol=1e-4)
# From <NAME> al. Minimizing the L2 and Linf star discrepancies
# of a single point in the unit hypercube. JCAM, 2005
# Table 1 on Page 283
for dim in [2, 4, 8, 16, 32, 64]:
ref = np.sqrt(3**(-dim))
assert_allclose(qmc.discrepancy(np.array([[1]*dim]),
method='L2-star'), ref)
def test_discrepancy_errors(self):
sample = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
with pytest.raises(ValueError, match=r"Sample is not in unit "
r"hypercube"):
qmc.discrepancy(sample)
with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
qmc.discrepancy([1, 3])
sample = [[0, 0], [1, 1], [0.5, 0.5]]
with pytest.raises(ValueError, match=r"toto is not a valid method."):
qmc.discrepancy(sample, method='toto')
def test_update_discrepancy(self):
space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0)
disc_init = qmc.discrepancy(space_1[:-1], iterative=True)
disc_iter = update_discrepancy(space_1[-1], space_1[:-1],
disc_init)
assert_allclose(disc_iter, 0.0081, atol=1e-4)
# errors
with pytest.raises(ValueError, match=r"Sample is not in unit "
r"hypercube"):
update_discrepancy(space_1[-1], space_1[:-1] + 1, disc_init)
with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
update_discrepancy(space_1[-1], space_1[0], disc_init)
x_new = [1, 3]
with pytest.raises(ValueError, match=r"x_new is not in unit "
r"hypercube"):
update_discrepancy(x_new, space_1[:-1], disc_init)
x_new = [[0.5, 0.5]]
with pytest.raises(ValueError, match=r"x_new is not a 1D array"):
update_discrepancy(x_new, space_1[:-1], disc_init)
def test_discrepancy_alternative_implementation(self):
"""Alternative definitions from <NAME>."""
def disc_c2(x):
n, s = x.shape
xij = x
disc1 = np.sum(np.prod((1
+ 1/2*np.abs(xij-0.5)
- 1/2*np.abs(xij-0.5)**2), axis=1))
xij = x[None, :, :]
xkj = x[:, None, :]
disc2 = np.sum(np.sum(np.prod(1
+ 1/2*np.abs(xij - 0.5)
+ 1/2*np.abs(xkj - 0.5)
- 1/2*np.abs(xij - xkj), axis = 2),
axis=0))
return (13/12)**s - 2/n * disc1 + 1/n**2*disc2
def disc_wd(x):
n, s = x.shape
xij = x[None, :, :]
xkj = x[:, None, :]
disc = np.sum(np.sum(np.prod(3/2
- np.abs(xij - xkj)
+ np.abs(xij - xkj)**2, axis = 2),
axis=0))
return -(4/3)**s + 1/n**2 * disc
def disc_md(x):
n, s = x.shape
xij = x
disc1 = np.sum(np.prod((5/3
- 1/4*np.abs(xij-0.5)
- 1/4*np.abs(xij-0.5)**2), axis=1))
xij = x[None, :, :]
xkj = x[:, None, :]
disc2 = np.sum(np.sum(np.prod(15/8
- 1/4*np.abs(xij - 0.5)
- 1/4*np.abs(xkj - 0.5)
- 3/4*np.abs(xij - xkj)
+ 1/2*np.abs(xij - xkj)**2,
axis = 2), axis=0))
return (19/12)**s - 2/n * disc1 + 1/n**2*disc2
def disc_star_l2(x):
n, s = x.shape
return np.sqrt(
3 ** (-s) - 2 ** (1 - s) / n
* np.sum(np.prod(1 - x ** 2, axis=1))
+ np.sum([
np.prod(1 - np.maximum(x[k, :], x[j, :]))
for k in range(n) for j in range(n)
]) / n ** 2
)
np.random.seed(0)
sample = np.random.rand(30, 10)
disc_curr = qmc.discrepancy(sample, method='CD')
disc_alt = disc_c2(sample)
assert_allclose(disc_curr, disc_alt)
disc_curr = qmc.discrepancy(sample, method='WD')
disc_alt = disc_wd(sample)
assert_allclose(disc_curr, disc_alt)
disc_curr = qmc.discrepancy(sample, method='MD')
disc_alt = disc_md(sample)
assert_allclose(disc_curr, disc_alt)
disc_curr = qmc.discrepancy(sample, method='L2-star')
disc_alt = disc_star_l2(sample)
assert_allclose(disc_curr, disc_alt)
def test_n_primes(self):
primes = n_primes(10)
assert primes[-1] == 29
primes = n_primes(168)
assert primes[-1] == 997
primes = n_primes(350)
assert primes[-1] == 2357
def test_primes(self):
primes = primes_from_2_to(50)
out = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
assert_allclose(primes, out)
class TestVDC:
def test_van_der_corput(self):
seed = np.random.RandomState(12345)
sample = van_der_corput(10, seed=seed)
out = [0., 0.5, 0.25, 0.75, 0.125, 0.625, 0.375, 0.875, 0.0625, 0.5625]
assert_almost_equal(sample, out)
sample = van_der_corput(7, start_index=3, seed=seed)
assert_almost_equal(sample, out[3:])
def test_van_der_corput_scramble(self):
seed = np.random.RandomState(123456)
out = van_der_corput(10, scramble=True, seed=seed)
seed = np.random.RandomState(123456)
sample = van_der_corput(7, start_index=3, scramble=True,
seed=seed)
assert_almost_equal(sample, out[3:])
class RandomEngine(qmc.QMCEngine):
def __init__(self, d, seed):
super().__init__(d=d, seed=seed)
def random(self, n=1):
self.num_generated += n
try:
sample = self.rng.random((n, self.d))
except AttributeError:
sample = self.rng.random_sample((n, self.d))
return sample
def test_subclassing_QMCEngine():
seed = np.random.RandomState(123456)
engine = RandomEngine(2, seed=seed)
sample_1 = engine.random(n=5)
sample_2 = engine.random(n=7)
assert engine.num_generated == 12
# reset and re-sample
engine.reset()
assert engine.num_generated == 0
sample_1_test = engine.random(n=5)
assert_equal(sample_1, sample_1_test)
# repeat reset and fast forward
engine.reset()
engine.fast_forward(n=5)
sample_2_test = engine.random(n=7)
assert_equal(sample_2, sample_2_test)
assert engine.num_generated == 12
class QMCEngineTests:
"""Generic tests for QMC engines."""
qmce = NotImplemented
can_scramble = NotImplemented
unscramble_nd = NotImplemented
scramble_nd = NotImplemented
scramble = [True, False]
ids = ["Scrambled", "Unscrambled"]
def engine(self, scramble: bool, **kwargs) -> QMCEngine:
seed = np.random.RandomState(123456)
if self.can_scramble:
return self.qmce(scramble=scramble, seed=seed, **kwargs)
else:
if scramble:
pytest.skip()
else:
return self.qmce(seed=seed, **kwargs)
def reference(self, scramble: bool) -> np.ndarray:
return self.scramble_nd if scramble else self.unscramble_nd
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_0dim(self, scramble):
engine = self.engine(d=0, scramble=scramble)
sample = engine.random(4)
assert_array_equal(np.empty((4, 0)), sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_0sample(self, scramble):
engine = self.engine(d=2, scramble=scramble)
sample = engine.random(0)
assert_array_equal(np.empty((0, 2)), sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_1sample(self, scramble):
engine = self.engine(d=2, scramble=scramble)
sample = engine.random(1)
assert (1, 2) == sample.shape
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_bounds(self, scramble):
engine = self.engine(d=100, scramble=scramble)
sample = engine.random(512)
assert_(np.all(sample >= 0))
assert_(np.all(sample <= 1))
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_sample(self, scramble):
ref_sample = self.reference(scramble=scramble)
engine = self.engine(d=2, scramble=scramble)
sample = engine.random(n=len(ref_sample))
assert_almost_equal(sample, ref_sample, decimal=1)
assert engine.num_generated == len(ref_sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_continuing(self, scramble):
ref_sample = self.reference(scramble=scramble)
engine = self.engine(d=2, scramble=scramble)
n_half = len(ref_sample) // 2
_ = engine.random(n=n_half)
sample = engine.random(n=n_half)
assert_almost_equal(sample, ref_sample[n_half:], decimal=1)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_reset(self, scramble):
engine = self.engine(d=2, scramble=scramble)
ref_sample = engine.random(n=8)
engine.reset()
assert engine.num_generated == 0
sample = engine.random(n=8)
assert_allclose(sample, ref_sample)
@pytest.mark.parametrize("scramble", scramble, ids=ids)
def test_fast_forward(self, scramble):
ref_sample = self.reference(scramble=scramble)
engine = self.engine(d=2, scramble=scramble)
engine.fast_forward(4)
sample = engine.random(n=4)
assert_almost_equal(sample, ref_sample[4:], decimal=1)
# alternate fast forwarding with sampling
engine.reset()
even_draws = []
for i in range(8):
if i % 2 == 0:
even_draws.append(engine.random())
else:
engine.fast_forward(1)
assert_almost_equal(
ref_sample[[i for i in range(8) if i % 2 == 0]],
np.concatenate(even_draws),
decimal=5
)
@pytest.mark.parametrize("scramble", [True])
def test_distribution(self, scramble):
d = 50
engine = self.engine(d=d, scramble=scramble)
sample = engine.random(1024)
assert_array_almost_equal(
np.mean(sample, axis=0), np.repeat(0.5, d), decimal=2
)
assert_array_almost_equal(
np.percentile(sample, 25, axis=0), np.repeat(0.25, d), decimal=2
)
assert_array_almost_equal(
np.percentile(sample, 75, axis=0), np.repeat(0.75, d), decimal=2
)
class TestHalton(QMCEngineTests):
qmce = qmc.Halton
can_scramble = True
# theoretical values known from <NAME>
unscramble_nd = np.array([[0, 0], [1 / 2, 1 / 3],
[1 / 4, 2 / 3], [3 / 4, 1 / 9],
[1 / 8, 4 / 9], [5 / 8, 7 / 9],
[3 / 8, 2 / 9], [7 / 8, 5 / 9]])
# theoretical values unknown: convergence properties checked
scramble_nd = np.array([[0.34229571, 0.89178423],
[0.84229571, 0.07696942],
[0.21729571, 0.41030275],
[0.71729571, 0.74363609],
[0.46729571, 0.18808053],
[0.96729571, 0.52141386],
[0.06104571, 0.8547472],
[0.56104571, 0.29919164]])
class TestLHS(QMCEngineTests):
qmce = qmc.LatinHypercube
can_scramble = False
def test_continuing(self, *args):
pytest.skip("Not applicable: not a sequence.")
def test_fast_forward(self, *args):
pytest.skip("Not applicable: not a sequence.")
def test_sample(self, *args):
pytest.skip("Not applicable: the value of reference sample is implementation dependent.")
def test_sample_stratified(self):
d, n = 4, 20
expected1d = (np.arange(n) + 0.5) / n
expected = np.broadcast_to(expected1d, (d, n)).T
engine = self.engine(d=d, scramble=False, centered=True)
sample = engine.random(n=n)
sorted_sample = np.sort(sample, axis=0)
assert_equal(sorted_sample, expected)
assert np.any(sample != expected)
engine = self.engine(d=d, scramble=False, centered=False)
sample = engine.random(n=n)
sorted_sample = np.sort(sample, axis=0)
assert_allclose(sorted_sample, expected, atol=0.5 / n)
assert np.any(sample - expected > 0.5 / n)
class TestSobol(QMCEngineTests):
qmce = qmc.Sobol
can_scramble = True
# theoretical values from Joe Kuo2010
unscramble_nd = np.array([[0., 0.],
[0.5, 0.5],
[0.75, 0.25],
[0.25, 0.75],
[0.375, 0.375],
[0.875, 0.875],
[0.625, 0.125],
[0.125, 0.625]])
# theoretical values unknown: convergence properties checked
scramble_nd = np.array([[0.50860737, 0.29320504],
[0.07116939, 0.89594537],
[0.49354145, 0.11524881],
[0.93097717, 0.70244044],
[0.87266153, 0.23887917],
[0.31021884, 0.57600391],
[0.13687253, 0.42054182],
[0.69931293, 0.77336788]])
def test_warning(self):
with pytest.warns(UserWarning, match=r"The balance properties of "
r"Sobol' points"):
seed = np.random.RandomState(12345)
engine = qmc.Sobol(1, seed=seed)
engine.random(10)
def test_random_base2(self):
seed = np.random.RandomState(12345)
engine = qmc.Sobol(2, scramble=False, seed=seed)
sample = engine.random_base2(2)
assert_array_equal(self.unscramble_nd[:4],
sample)
# resampling still having N=2**n
sample = engine.random_base2(2)
assert_array_equal(self.unscramble_nd[4:8],
sample)
# resampling again but leading to N!=2**n
with pytest.raises(ValueError, match=r"The balance properties of "
r"Sobol' points"):
engine.random_base2(2)
def test_raise(self):
with pytest.raises(ValueError, match=r"Maximum supported "
r"dimensionality"):
qmc.Sobol(qmc.Sobol.MAXDIM + 1)
def test_high_dim(self):
seed = np.random.RandomState(12345)
engine = qmc.Sobol(1111, scramble=False, seed=seed)
count1 = Counter(engine.random().flatten().tolist())
count2 = Counter(engine.random().flatten().tolist())
assert_equal(count1, Counter({0.0: 1111}))
assert_equal(count2, Counter({0.5: 1111}))
class TestMultinomialQMC:
def test_MultinomialNegativePs(self):
p = np.array([0.12, 0.26, -0.05, 0.35, 0.22])
with pytest.raises(ValueError, match=r"Elements of pvals must "
r"be non-negative."):
qmc.MultinomialQMC(p)
def test_MultinomialSumOfPTooLarge(self):
p = np.array([0.12, 0.26, 0.1, 0.35, 0.22])
with pytest.raises(ValueError, match=r"Elements of pvals must sum "
r"to 1."):
qmc.MultinomialQMC(p)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_MultinomialBasicDraw(self):
seed = np.random.RandomState(12345)
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
expected = np.array([12, 25, 6, 35, 22])
engine = qmc.MultinomialQMC(p, seed=seed)
assert_array_equal(engine.random(100), expected)
def test_MultinomialDistribution(self):
seed = np.random.RandomState(12345)
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
engine = qmc.MultinomialQMC(p, seed=seed)
draws = engine.random(8192)
assert_array_almost_equal(draws / np.sum(draws), p, decimal=4)
def test_FindIndex(self):
p_cumulative = np.array([0.1, 0.4, 0.45, 0.6, 0.75, 0.9, 0.99, 1.0])
size = len(p_cumulative)
assert_equal(_test_find_index(p_cumulative, size, 0.0), 0)
assert_equal(_test_find_index(p_cumulative, size, 0.4), 2)
assert_equal(_test_find_index(p_cumulative, size, 0.44999), 2)
assert_equal(_test_find_index(p_cumulative, size, 0.45001), 3)
assert_equal(_test_find_index(p_cumulative, size, 1.0), size - 1)
@pytest.mark.filterwarnings('ignore::UserWarning')
def test_other_engine(self):
# same as test_MultinomialBasicDraw with different engine
seed = np.random.RandomState(12345)
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
expected = np.array([12, 25, 6, 35, 22])
base_engine = qmc.Sobol(1, scramble=True, seed=seed)
engine = qmc.MultinomialQMC(p, engine=base_engine, seed=seed)
assert_array_equal(engine.random(100), expected)
def test_reset(self):
p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
seed = np.random.RandomState(12345)
engine = qmc.MultinomialQMC(p, seed=seed)
samples = engine.random(2)
engine.reset()
samples_reset = engine.random(2)
assert_array_equal(samples, samples_reset)
class TestNormalQMC:
def test_NormalQMC(self):
# d = 1
seed = np.random.RandomState(123456)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(1), seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2
engine = qmc.MultivariateNormalQMC(mean=np.zeros(2), seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
def test_NormalQMCInvTransform(self):
# d = 1
seed = np.random.RandomState(123456)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(1), inv_transform=True, seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=True, seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
def test_other_engine(self):
seed = np.random.RandomState(123456)
base_engine = qmc.Sobol(d=2, scramble=False, seed=seed)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(2),
engine=base_engine,
inv_transform=True, seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
def test_NormalQMCSeeded(self):
# test even dimension
seed = np.random.RandomState(12345)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), inv_transform=False, seed=seed)
samples = engine.random(n=2)
samples_expected = np.array(
[[-0.943472, 0.405116], [-0.63099602, -1.32950772]]
)
assert_array_almost_equal(samples, samples_expected)
# test odd dimension
seed = np.random.RandomState(12345)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), inv_transform=False, seed=seed)
samples = engine.random(n=2)
samples_expected = np.array(
[
[-0.943472, 0.405116, 0.268828],
[1.83169884, -1.40473647, 0.24334828],
]
)
assert_array_almost_equal(samples, samples_expected)
def test_NormalQMCSeededInvTransform(self):
# test even dimension
seed = np.random.RandomState(12345)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), seed=seed, inv_transform=True)
samples = engine.random(n=2)
samples_expected = np.array(
[[0.228309, -0.162516], [-0.41622922, 0.46622792]]
)
assert_array_almost_equal(samples, samples_expected)
# test odd dimension
seed = np.random.RandomState(12345)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(3), seed=seed, inv_transform=True)
samples = engine.random(n=2)
samples_expected = np.array(
[
[0.228309, -0.162516, 0.167352],
[-1.40525266, 1.37652443, -0.8519666],
]
)
assert_array_almost_equal(samples, samples_expected)
def test_NormalQMCShapiro(self):
seed = np.random.RandomState(12345)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(2), seed=seed)
samples = engine.random(n=256)
assert_(all(np.abs(samples.mean(axis=0)) < 1e-2))
assert_(all(np.abs(samples.std(axis=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert_(pval > 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert_(np.abs(cov[0, 1]) < 1e-2)
def test_NormalQMCShapiroInvTransform(self):
seed = np.random.RandomState(12345)
engine = qmc.MultivariateNormalQMC(
mean=np.zeros(2), seed=seed, inv_transform=True)
samples = engine.random(n=256)
assert_(all(np.abs(samples.mean(axis=0)) < 1e-2))
assert_(all(np.abs(samples.std(axis=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
assert_(pval > 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
assert_(np.abs(cov[0, 1]) < 1e-2)
def test_reset(self):
seed = np.random.RandomState(12345)
engine = qmc.MultivariateNormalQMC(mean=np.zeros(1), seed=seed)
samples = engine.random(2)
engine.reset()
samples_reset = engine.random(2)
assert_array_equal(samples, samples_reset)
class TestMultivariateNormalQMC:
def test_MultivariateNormalQMCNonPSD(self):
# try with non-psd, non-pd cov and expect an assertion error
with pytest.raises(ValueError, match=r"Covariance matrix not PSD."):
seed = np.random.RandomState(123456)
qmc.MultivariateNormalQMC([0, 0], [[1, 2], [2, 1]], seed=seed)
def test_MultivariateNormalQMCNonPD(self):
# try with non-pd but psd cov; should work
seed = np.random.RandomState(123456)
engine = qmc.MultivariateNormalQMC(
[0, 0, 0], [[1, 0, 1], [0, 1, 1], [1, 1, 2]],
seed=seed
)
assert_(engine._corr_matrix is not None)
def test_MultivariateNormalQMCSymmetric(self):
# try with non-symmetric cov and expect an error
with pytest.raises(ValueError, match=r"Covariance matrix is not "
r"symmetric."):
seed = np.random.RandomState(123456)
qmc.MultivariateNormalQMC([0, 0], [[1, 0], [2, 1]], seed=seed)
def test_MultivariateNormalQMCDim(self):
# incompatible dimension of mean/cov
with pytest.raises(ValueError, match=r"Dimension mismatch between "
r"mean and covariance."):
seed = np.random.RandomState(123456)
qmc.MultivariateNormalQMC([0], [[1, 0], [0, 1]], seed=seed)
def test_MultivariateNormalQMC(self):
# d = 1 scalar
seed = np.random.RandomState(123456)
engine = qmc.MultivariateNormalQMC(mean=0, cov=5, seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2 list
engine = qmc.MultivariateNormalQMC(mean=[0, 1], cov=[[1, 0], [0, 1]],
seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
# d = 3 np.array
mean = np.array([0, 1, 2])
cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
engine = qmc.MultivariateNormalQMC(mean, cov, seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 3))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 3))
def test_MultivariateNormalQMCInvTransform(self):
# d = 1 scalar
seed = np.random.RandomState(123456)
engine = qmc.MultivariateNormalQMC(mean=0, cov=5, inv_transform=True,
seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 1))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 1))
# d = 2 list
engine = qmc.MultivariateNormalQMC(
mean=[0, 1], cov=[[1, 0], [0, 1]], inv_transform=True,
seed=seed
)
samples = engine.random()
assert_equal(samples.shape, (1, 2))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 2))
# d = 3 np.array
mean = np.array([0, 1, 2])
cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
engine = qmc.MultivariateNormalQMC(mean, cov, inv_transform=True,
seed=seed)
samples = engine.random()
assert_equal(samples.shape, (1, 3))
samples = engine.random(n=5)
assert_equal(samples.shape, (5, 3))
def test_MultivariateNormalQMCSeeded(self):
# test even dimension
seed = np.random.RandomState(12345)
np.random.seed(54321)
a = np.random.randn(2, 2)
A = a @ a.transpose() + np.diag(np.random.rand(2))
engine = qmc.MultivariateNormalQMC(np.array([0, 0]), A,
inv_transform=False, seed=seed)
samples = engine.random(n=2)
samples_expected = np.array(
[[-1.010703, -0.324223], [-0.67595995, -2.27437872]]
)
assert_array_almost_equal(samples, samples_expected)
# test odd dimension
seed = np.random.RandomState(12345)
np.random.seed(54321)
a = | np.random.randn(3, 3) | numpy.random.randn |
"""Wrapper for semi-empirical QM energies with XTB.
"""
__all__ = ["XTBEnergy", "XTBBridge"]
import warnings
import torch
import numpy as np
from .base import _BridgeEnergy, _Bridge
class XTBBridge(_Bridge):
"""Wrapper around XTB for semi-empirical QM energy calculations.
Parameters
----------
numbers : np.ndarray
Atomic numbers
temperature : float
Temperature in Kelvin.
method : str
The semi-empirical method that is used to compute energies.
solvent : str
The solvent. If empty string, perform a vacuum calculation.
verbosity : int
0 (muted), 1 (minimal), 2 (full)
err_handling : str
How to deal with exceptions inside XTB. One of `["ignore", "warning", "error"]`
Attributes
----------
n_atoms : int
The number of atoms in this molecules.
available_solvents : List[str]
The solvent models that are available for computations in xtb.
available_methods : List[str]
The semiempirical methods that are available for computations in xtb.
Examples
--------
Setting up an XTB energy for a small peptide from bgmol
>>> from bgmol.systems import MiniPeptide
>>> from bgflow import XTBEnergy, XTBBridge
>>> import numpy as np
>>> import torch
>>> system = MiniPeptide("G")
>>> numbers = np.array([atom.element.number for atom in system.mdtraj_topology.atoms])
>>> target = XTBEnergy(XTBBridge(numbers=numbers, temperature=300, solvent="water"))
>>> xyz = torch.tensor(system.positions)
>>> energy = target.energy(xyz)
Notes
-----
Requires the xtb-python program (installable with `conda install -c conda-forge xtb-python`).
"""
def __init__(
self,
numbers: np.ndarray,
temperature: float,
method: str = "GFN2-xTB",
solvent: str = "",
verbosity: int = 0,
err_handling: str = "warning"
):
self.numbers = numbers
self.temperature = temperature
self.method = method
self.solvent = solvent
self.verbosity = verbosity
self.err_handling = err_handling
super().__init__()
@property
def n_atoms(self):
return len(self.numbers)
@property
def available_solvents(self):
from xtb.utils import _solvents
return list(_solvents.keys())
@property
def available_methods(self):
from xtb.utils import _methods
return list(_methods.keys())
def _evaluate_single(
self,
positions: torch.Tensor,
evaluate_force=True,
evaluate_energy=True,
):
from xtb.interface import Calculator, XTBException
from xtb.utils import get_method, get_solvent
positions = _nm2bohr(positions)
energy, force = None, None
try:
calc = Calculator(get_method(self.method), self.numbers, positions)
calc.set_solvent(get_solvent(self.solvent))
calc.set_verbosity(self.verbosity)
calc.set_electronic_temperature(self.temperature)
try:
res = calc.singlepoint()
except XTBException:
# Try with higher temperature
calc.set_electronic_temperature(10 * self.temperature)
res = calc.singlepoint()
calc.set_electronic_temperature(self.temperature)
res = calc.singlepoint(res)
if evaluate_energy:
energy = _hartree2kbt(res.get_energy(), self.temperature)
if evaluate_force:
force = _hartree_per_bohr2kbt_per_nm(
-res.get_gradient(),
self.temperature
)
assert not np.isnan(energy)
assert not np.isnan(force).any()
except XTBException as e:
if self.err_handling == "error":
raise e
elif self.err_handling == "warning":
warnings.warn(
f"Caught exception in xtb. "
f"Returning infinite energy and zero force. "
f"Original exception: {e}"
)
force = np.zeros_like(positions)
energy = np.infty
elif self.err_handling == "ignore":
force = np.zeros_like(positions)
energy = np.infty
except AssertionError:
force[ | np.isnan(force) | numpy.isnan |
import os
import unittest
import tempfile
import shutil
import numpy as np
import darr
from darr.array import asarray, create_array, create_datadir, Array, \
numtypesdescr, truncate_array, delete_array, AppendDataError, \
numtypedescriptiontxt
from darr.utils import tempdir, tempdirfile
# TODO clean up overwrite parameters, not necessary anymore
class DarrTestCase(unittest.TestCase):
def assertArrayIdentical(self, x, y):
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertEqual(np.sum((x-y)**2), 0)
class AsArray(DarrTestCase):
def setUp(self):
self.tempdirname1 = tempfile.mkdtemp()
self.tempdirname2 = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdirname1)
shutil.rmtree(self.tempdirname2)
def check_arrayequaltoasarray(self, ndarray):
"""Tests if asarray creates an array of same shape and dtype and same
contents as input."""
dar = asarray(path=self.tempdirname1, array=ndarray, overwrite=True)
ndarray = np.asarray(ndarray) # could be list or tuple
self.assertArrayIdentical(dar[:], ndarray)
self.assertEqual(dar.dtype, ndarray.dtype)
self.assertEqual(dar.shape, ndarray.shape)
def test_asarraynumberint(self):
dar = asarray(path=self.tempdirname1, array=1, overwrite=True)
self.assertEqual(dar[0], 1)
def test_asarraynumberfloat(self):
dar = asarray(path=self.tempdirname1, array=1.0, overwrite=True)
self.assertEqual(dar[0], 1.0)
def test_asarrayonedimensionalndarray(self):
ndarray = np.arange(24)
self.check_arrayequaltoasarray(ndarray)
def test_asarraytwodimensionalndarray(self):
ndarray = np.arange(24).reshape(12, 2)
self.check_arrayequaltoasarray(ndarray)
def test_asarraythreedimensionalndarray(self):
ndarray = np.arange(24).reshape(4, 2, 3)
self.check_arrayequaltoasarray(ndarray)
def test_asarrayonedimensionallist(self):
ndarray = [1, 2, 3, 4]
self.check_arrayequaltoasarray(ndarray)
def test_asarraytwodimensionallist(self):
ndarray = [[1, 2, 3, 4],
[1, 2, 3, 4]]
self.check_arrayequaltoasarray(ndarray)
def test_asarraythreedimensionallist(self):
ndarray = [[[1, 2, 3, 4],
[1, 2, 3, 4]],
[[1, 2, 3, 4],
[1, 2, 3, 4]]]
self.check_arrayequaltoasarray(ndarray)
def test_asarraynumericdtypes(self):
dtypes = numtypesdescr.keys()
for dtype in dtypes:
with self.subTest(dtype=dtype):
ndarray = np.arange(24, dtype=dtype)
self.check_arrayequaltoasarray(ndarray)
def test_asarrayfortranorder(self):
ndarray = np.asarray(np.arange(24, dtype='float64'), order='F')
self.check_arrayequaltoasarray(ndarray)
def test_asarraycorder(self):
ndarray = np.asarray(np.arange(24, dtype='float64'), order='C')
self.check_arrayequaltoasarray(ndarray)
def test_asarraylittleendian(self):
ndarray = np.arange(24, dtype='<f4')
self.check_arrayequaltoasarray(ndarray)
def test_asarraybigendian(self):
ndarray = np.arange(24, dtype='>f4')
self.check_arrayequaltoasarray(ndarray)
def test_asarrayoverwrite(self):
a = np.zeros((5,), dtype='float64')
_ = asarray(path=self.tempdirname1, array=a, overwrite=True)
b = np.ones((4,2), dtype='uint8')
dar = asarray(path=self.tempdirname1, array=b, overwrite=True)
self.assertArrayIdentical(dar[:], b)
def test_asarraysequencesmallchunklen(self):
a = [1, 2, 3, 4, 5]
dar = asarray(path=self.tempdirname1, array=a, chunklen=3,
overwrite=True)
self.assertArrayIdentical(np.array(a), dar[:])
def test_asarraywritingsmallerchunks(self):
a = np.arange(1024, dtype='int64').reshape(2,-1)
dar = asarray(path=self.tempdirname1, array=a, chunklen=4, overwrite=True)
self.assertArrayIdentical(a, dar[:])
dar = asarray(path=self.tempdirname1, array=a, chunklen=5, overwrite=True)
self.assertArrayIdentical(a, dar[:])
def test_asarraywritinglargerthanlenchunks(self):
a = np.arange(1024, dtype='int64').reshape(2, -1)
dar = asarray(path=self.tempdirname1, array=a, chunklen=4096, overwrite=True)
self.assertArrayIdentical(a, dar[:])
def test_asarrayarray(self):
a = | np.arange(1024, dtype='int64') | numpy.arange |
#!/usr/bin/env python
"""
The script converts the .dat files from afphot to .nc files for M2 pipeline.
Before running this script, afphot should be ran (usually in muscat-abc)
and its results copied to /ut2/muscat/reduction/muscat/DATE.
To convert .dat to .nc, this script does the following.
1. read the .dat files in /ut2/muscat/reduction/muscat/DATE/TARGET_N/PHOTDIR/radXX.0
where
DATE: observation date (e.g. 191029)
TARGET_N: e.g. TOI516_0, TOI516_1, TOI516_2 for g-,r-,z-band produced by afphot
PHOTDIR: either apphot_mapping or apphot_centroid
radXX.0: radius containing .dat files
2. convert JD to BJD_TDB, although M2 pipeline uses MJD_TDB
3. construct xarrays assuming:
fwhm as proxy to object entropy (eobj)
sky as proxy to sky median (msky)
peak as proxy to sky entropy (esky)
3. save xarrays dataset into .nc files for each band
"""
import os
import re
from glob import glob
import pandas as pd
from astropy.time import Time
from tqdm import tqdm
import numpy as np
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import EarthLocation
import xarray as xa
import matplotlib.pyplot as pl
from astroplan.plots import plot_finder_image
from astropy.visualization import ZScaleInterval
interval = ZScaleInterval(contrast=0.5)
from muscat2ph.phdata import PhotometryData
# import sys
# sys.path.append('/home/muscat/muscat2/')
# from toi_functions import get_toi
#http://www.oao.nao.ac.jp/en/telescope/abouttel188/
oao = EarthLocation.from_geodetic(lat='34:34:37.47', lon='133:35:38.24', height=372*u.m)
muscat_fov = 6.8 #arcsec in diagonal
fov_rad = muscat_fov*u.arcmin
interval = ZScaleInterval()
def binned(a, binsize, fun=np.mean):
a_b = []
for i in range(0, a.shape[0], binsize):
a_b.append(fun(a[i:i+binsize], axis=0))
return a_b
class DatReader:
def __init__(self, obsdate, objname, objcoord, bands=['g','r','z_s'], nstars=None,
ref_frame=0, ref_band='r',
photdir='apphot_mapping', datadir= '/ut2/muscat/reduction/muscat',
verbose=True, overwrite=False):
"""initialize
"""
if 'z' in bands:
raise ValueError('use z_s instead of z')
self.obsdate = obsdate
self.objname = objname
self.bands = bands
self.ref_band = ref_band
self.ref_frame = ref_frame
self.nstars = nstars
self.photdir = photdir
self.datadir = datadir
self.objcoord = self._get_obj_coord(objcoord)
self.paths = self._get_paths()
self.airmasses = None
self.exptimes = None
self.data = self._load_dat_files()
self.radii = {band: sorted(self.data[band].keys()) for band in self.bands}
self.jds = {band: sorted(self.data[band][self.radii[band][0]].keys()) for band in self.bands}
self.mjds = None
self.bjds = None #tdb
self.use_barycorrpy = False
self._convert_to_bjd_tdb() #populate mjds and bjds attributes
self.verbose = verbose
self.overwrite = overwrite
def _get_obj_coord(self, objcoord):
"""Define coord used in bjd_tdb conversion
"""
objcoord = SkyCoord(ra=objcoord[0], dec=objcoord[1], unit='deg')
return objcoord
def _get_paths(self):
"""get path to each data directory per band
"""
paths = {}
nradii = {}
loc = f'{self.datadir}/{self.obsdate}'
if not os.path.exists(loc):
raise FileNotFoundError(f'afphot files not found in {loc}')
for n,band in enumerate(self.bands):
path = f'{loc}/{self.objname}_{n}/{self.photdir}'
radius_dirs = glob(path+'/rad*')
errmsg = f'{path} is empty'
assert len(radius_dirs)>0, errmsg
paths[band] = radius_dirs
nradii[band] = (len(radius_dirs))
errmsg = f'nradii: {nradii} have unequal number of radius directories'
assert len(set(nradii.values()))==1, errmsg
return paths
def _load_dat_files(self):
"""get data per band per aperture radius per cadence;
aperture radius is parsed from the directory produced by afphot
Note: aperture radius in afphot is chosen arbitrarily,
whereas M2 pipeline uses 9 radii: (4,8,12,16,20,25,30,40,50) pix
TODO: when a .dat file is corrupted, it is better to populate
the entry with a dataframe of null/NaN values; currrently it is
simpler to omit/skip using the entire radius directory
"""
data = {}
exptimes = {}
airmasses = {}
for band in tqdm(self.bands, desc='reading .dat files'):
radius_dirs = self.paths[band]
apertures = {}
for radius_dir in radius_dirs:
#parse radius from directory name
radius = float(radius_dir.split('/')[-1][3:])
#get dat files inside aperture radius directory
dat_files = glob(radius_dir+'/*')
dat_files.sort()
#specify column names based written in .dat file
column_names = 'ID xcen ycen nflux flux err sky sky_sdev SNR nbadpix fwhm peak'.split()
cadences = {}
exptime = []
airmass = []
nrows, ncols = [], []
for i,dat_file in enumerate(dat_files):
try:
#parse lines 0, 18, 20 which contains gjd, exptime, and airmass
d = pd.read_csv(dat_file, header=None)
time = float(d.iloc[0].str.split('=').values[0][1]) #gjd - 2450000
time+=2450000
exptime.append(float(d.iloc[18].str.split('=').values[0][1]))
airmass.append(float(d.iloc[20].str.split('=').values[0][1]))
except Exception as e:
#some afphot dat files may be corrupted
errmsg = f'{dat_file} seems corrupted.\n\n'
errmsg+='You can temporarily delete the radius directory in each band:\n'
for n,_ in enumerate(self.bands):
p = f'{self.datadir}/{self.obsdate}/{self.objname}_{n}/{self.photdir}/rad{radius}\n'
errmsg+=f'$ rm -rf {p}'
raise IOError(errmsg)
# parse succeeding lines as dataframe
d = pd.read_csv(dat_file, delim_whitespace=True, comment='#', names=column_names)
nrows.append(d.shape[0])
ncols.append(d.shape[1])
# in cases when data is bad, the number of stars detected
# in some frames is less than nstars used in afphot;
if (self.nstars is not None) and self.nstars < len(d):
# trim to fewer stars
d = d.iloc[:self.nstars]
# check if each .dat file has same shape as the rest
nrow = int(np.median(nrows))
ncol = int(np.median(ncols))
if i>1:
errmsg =f'{dat_file} has shape {d.shape} instead of {(nrow,ncol)}\n\n'
errmsg+=f'You can set nstars<={d.shape[0]}, or\n\n'
errmsg+='You can temporarily delete the radius directory in each band:\n'
for n,_ in enumerate(self.bands):
p = f'{self.datadir}/{self.obsdate}/{self.objname}_{n}/{self.photdir}/rad{radius}\n'
errmsg+=f'$ rm -rf {p}'
assert (nrow,ncol)==d.shape, errmsg
assert len(d)>0, f'{dat_file} seems empty'
cadences[time]=d
#save each data frame corresponding to each cadence/ exposure sequence
assert len(cadences)>0, f'{cadences} seems empty'
apertures[radius] = cadences
assert len(apertures)>0, f'{apertures} seems empty'
data[band] = apertures
airmasses[band] = airmass #not band-dependent but differs in length per band
exptimes[band] = exptime
#set attributes
self.exptimes = exptimes
self.airmasses = airmasses
return data
def _convert_to_bjd_tdb(self):
"""convert jd to bjd format and tdb time scale
"""
mjds = {}
bjds = {}
for band in self.bands:
radius = self.radii[band][0] #any radius will do
d = self.data[band][radius] #because time per radius is identical
jd = Time(list(d.keys()), format='jd', scale='utc', location=oao)
#mjd time format
mjds[band] = jd.mjd
if self.use_barycorrpy:
#https://arxiv.org/pdf/1801.01634.pdf
try:
from barycorrpy import utc_tdb
except:
raise ImportError("pip install barycorrpy")
#convert jd to bjd_tdb
result = utc_tdb.JDUTC_to_BJDTDB(jd,
ra=self.objcoord.ra.deg,
dec=self.objcoord.dec.deg,
lat=oao.lat.deg,
longi=oao.lon.deg,
alt=oao.height.value)
bjds[band] = result[0]
else:
#BJD time format in TDB time scale
bjds[band] = (jd.tdb + jd.light_travel_time(self.objcoord)).value
#check difference between two time scales (should be < 8 mins!)
diff=bjds[band]-2400000.5-mjds[band]
diff_in_minutes = np.median(diff)*24*60
assert diff_in_minutes < 8.4, f'{band}: {diff_in_minutes:.2} min'
self.mjds = mjds
self.bjds = bjds
#return mjds, bjds
def show_ref_table(self):
"""return dat file table corresponding to ref_frame
"""
radius = self.radii[self.ref_band][0] #any radius will do
jds = self.jds[self.ref_band]
df = self.data[self.ref_band][radius][jds[self.ref_frame]]
return df
def create_cpix_xarray(self, dummy_value=None):
"""pixel centroids of each star in a given reference frame and band
Note that cpix is the same for all bands
"""
ref_jd = self.jds[self.ref_band][self.ref_frame]
radius = self.radii[self.ref_band][0] #any radius will do
d = self.data[self.ref_band][radius][ref_jd]
if dummy_value is None:
cen = d[['xcen','ycen']]
else:
cen = np.full(d[['xcen','ycen']].shape, dummy_value)
cpix = xa.DataArray(cen,
name='centroids_pix',
dims='star centroid_pix'.split(),
coords={'centroid_pix': ['x', 'y'],
'star': d['ID']})
return cpix
def create_csky_xarray(self):
"""just place-holder for sky centroids since not available in afphot
(or as if astrometry.net failed in M2 pipeline)
"""
cpix = self.create_cpix_xarray()
ca = np.full_like(np.array(cpix), np.nan)
csky = xa.DataArray(ca,
name='centroids_sky',
dims='star centroid_sky'.split(),
coords={'centroid_sky': ['ra', 'dec'],
'star': cpix.star.data})
return csky
def create_flux_xarray(self, band, dummy_value=None):
"""flux with shape (time,napertures,nstars)
"""
d = self.data[band]
r = self.radii[band][0]
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
apers = self.radii[band]
napers = len(apers)
ncadences = len(self.jds[band])
# populate
if dummy_value is None:
fluxes = np.zeros((ncadences,napers,nstars))
for n,jd in enumerate(self.jds[band]):
for m,r in enumerate(self.radii[band]):
fluxes[n,m] = d[r][jd]['flux'].values
else:
fluxes = np.full((ncadences,napers,nstars), dummy_value)
#reshape
fluxes = fluxes.reshape(-1,nstars,napers)
# fluxes.shape
# construct
flux = xa.DataArray(fluxes,
name='flux',
dims='mjd star aperture'.split(),
coords={'mjd': self.mjds[band],
'aperture': apers,
'star': stars
})
return flux
def create_eobj_xarray(self, band, dummy_value=None):
"""fwhm as proxy to object entropy (eobj)
Note that entropy e in M2 pipeline is defined as:
z = (a - a.min() + 1e-10)/ a.sum()
e = -(z*log(z)).sum()
"""
d = self.data[band]
r = self.radii[band][0] #any radius will do
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
apers = self.radii[band]
napers = len(apers)
ncadences = len(self.jds[band])
# populate
if dummy_value is None:
eobjs = np.zeros((ncadences,napers,nstars))
for n,jd in enumerate(self.jds[band]):
for m,r in enumerate(self.radii[band]):
eobjs[n,m] = d[r][jd]['fwhm'].values
else:
eobjs = np.full((ncadences,napers,nstars), dummy_value)
#reshape
eobjs = eobjs.reshape(-1,nstars,napers)
# construct
eobj = xa.DataArray(eobjs,
name='eobj',
dims='mjd star aperture'.split(),
coords={'mjd': self.mjds[band],
'aperture': apers,
'star': stars
})
return eobj
def create_msky_xarray(self, band, dummy_value=None):
"""sky as proxy to sky median (msky)
"""
d = self.data[band]
r = self.radii[band][0]
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
cadences = self.jds[band]
ncadences = len(cadences)
# populate
if dummy_value is None:
mskys = np.zeros((ncadences,nstars))
for n,jd in enumerate(cadences):
for m,star in enumerate(stars):
mskys[n,m] = d[r][jd].iloc[m]['sky']
else:
mskys = np.full((ncadences,nstars), dummy_value)
# construct
msky = xa.DataArray(mskys,
name='msky',
dims='mjd star'.split(),
coords={'mjd': self.mjds[band],
'star': stars
})
return msky
def create_esky_xarray(self, band, dummy_value=None):
"""sky_sdev as proxy to sky entropy (esky)
"""
d = self.data[band]
r = self.radii[band][0]
jd = self.jds[band][0]
stars = d[r][jd]['ID'].values
nstars = len(stars)
cadences = self.jds[band]
ncadences = len(cadences)
# populate
if dummy_value is None:
eskys = np.zeros((ncadences,nstars))
for n,jd in enumerate(cadences):
for m,star in enumerate(stars):
eskys[n,m] = d[r][jd].iloc[m]['sky_sdev']
else:
eskys = | np.full((ncadences,nstars), dummy_value) | numpy.full |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 2020
@author: <NAME> <EMAIL>
@author: <NAME> <EMAIL>
"""
from scipy.integrate import solve_ivp, odeint
from scipy import optimize
from scipy.integrate import odeint
from scipy.optimize import least_squares
import numpy as np
import warnings
warnings.filterwarnings('ignore')
class compartimental_models:
def __init__(self):
pass
def r0(self):
return self.beta/self.gamma
#define least square errors
def sir_least_squares_error_ode(self,par, time_exp, f_exp, fitting_model, initial_conditions):
args = par
time = (time_exp.min(), time_exp.max())
y_model = fitting_model(initial_conditions, time, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
_, simulated_qoi, _ = simulated_ode_solution
residual = f_exp - simulated_qoi
return | np.sum(residual ** 2.0) | numpy.sum |
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
from numpy import sum as npsum
from numpy import zeros, diag, eye, sqrt, tile
from numpy.linalg import solve, cholesky
from numpy.random import rand
from scipy.stats import chi2, f
plt.style.use('seaborn')
from DimRedScenariosNormal import DimRedScenariosNormal
from NormalScenarios import NormalScenarios
def Tscenarios(nu, mu, sig2, j_, optionT=None, method='Riccati', d=None):
# This function generates student t simulations whose
# moments match the theoretical moments mu_, nu/(nu-2)@sigma2_, either from
# radial or stochastic representation and through dimension reduction.
# INPUTS
# nu : [scalar] degrees of freedom
# mu : [vector] (n_ x 1) vector of means
# sigma2 : [matrix] (n_ x n_) dispersion matrix
# j_ : [scalar] (even) number of simulations
# optionT : [struct] with fields (defaults values are 0 for both fields)
# optionT.dim_red : [scalar] number of factors to be used for normal
# scenario generation with dimension reduction. If it is set to 0, normal
# scenarios are generated without dimension reduction.
# optionT.stoc_rep : [scalar] Set it to 1 to generate t scenarios through
# stochastic representation via normal and chi-square scenarios.
# method : [string] Riccati (default), CPCA, PCA, LDL-Cholesky,
# Gram-Schmidt, Chol
# d : [matrix] (k_ x n_) full rank constraints matrix for CPCA
# OPS
# X : [matrix] (n_ x j_) matrix of scenarios drawn from a
# Student t distribution t(nu,mu,sig2).
#
#
# NOTE: Use always a large number of simulations j_ >> n_ to ensure that
# NormalScenarios works properly. Also we reccommend a low number of
# factors k_<< n_
# For details on the exercise, see here .
## Code
if optionT is None:
optionT = namedtuple('option', ['dim_red', 'stoc_rep'])
optionT.dim_red = 0
optionT.stoc_rep = 0
n_ = len(mu)
k_ = optionT.dim_red
if optionT.stoc_rep == 0:
# Step 1. Radial scenarios
R = sqrt(n_ * f.ppf(rand(j_, 1), n_, nu))
# Step 2. Correlation
rho2 = np.diagflat(diag(sig2) ** (-1 / 2)) @ sig2 @ np.diagflat(diag(sig2) ** (-1 / 2))
# Step 3. Normal scenarios
if optionT.dim_red > 0:
N, beta = DimRedScenariosNormal(zeros((n_, 1)), rho2, k_, j_, method, d)
else:
N, _ = NormalScenarios(zeros((n_, 1)), rho2, j_, method, d)
# Step 4. Inverse
if optionT.dim_red > 0:
delta2 = diag(eye(n_) - beta @ beta.T)
omega2 = np.diagflat(1 / delta2)
rho2_inv = omega2 - omega2 @ beta / (beta.T @ omega2 @ beta + eye((k_))) @ beta.T @ omega2
else:
rho2_inv = solve(rho2, eye(rho2.shape[0]))
# Step 5. Cholesky
rho_inv = cholesky(rho2_inv)
# Step 6. Normalizer
M = sqrt(npsum((rho_inv @ N) ** 2, axis=0))
# Step 7. Output
if optionT.stoc_rep == 0:
# Elliptical representation
X = tile(mu, (1, j_)) + np.diagflat(sqrt(diag(sig2))) @ N @ np.diagflat(1 / M) @ np.diagflat(R)
else:
# Stochastic representation
v = chi2.ppf(rand(j_, 1), nu) / nu
X = tile(mu, (1, j_)) + np.diagflat(sqrt( | diag(sig2) | numpy.diag |
import numpy as np
from pyfibre.model.tools.analysis import (
tensor_analysis, angle_analysis
)
from pyfibre.tests.pyfibre_test_case import PyFibreTestCase
class TestAnalysis(PyFibreTestCase):
def setUp(self):
self.tensor = np.ones((3, 3, 2, 2))
self.tensor[0, 0, 0, 1] = 2
self.tensor[0, 0, 1, 0] = 2
self.tensor[1, 1, 1] *= 4
def test_tensor_analysis(self):
tensor = np.array([[1, 0],
[0, 1]])
tot_coher, tot_angle, tot_energy = tensor_analysis(tensor)
self.assertArrayAlmostEqual(np.array([0]), tot_coher)
self.assertArrayAlmostEqual(np.array([0]), tot_angle)
self.assertArrayAlmostEqual(np.array([2]), tot_energy)
tensor = np.array([[1, 0],
[0, -1]])
tot_coher, tot_angle, tot_energy = tensor_analysis(tensor)
self.assertArrayAlmostEqual(np.array([0]), tot_coher)
self.assertArrayAlmostEqual(np.array([90]), tot_angle)
self.assertArrayAlmostEqual(np.array([2]), tot_energy)
tensor = np.array([[1, 0],
[0, 0]])
tot_coher, tot_angle, tot_energy = tensor_analysis(tensor)
self.assertArrayAlmostEqual( | np.array([1]) | numpy.array |
import numpy as np
def get_epipole(F):
'''
Epipole is the eigenvector associated with smallest eigenvalue of F
'''
evalue, evector = np.linalg.eig(F) # normalized evector
index = np.argmin(evalue)
epipole = evector[:, index]
return epipole
def get_rotation_axis(d):
# d_i to make image plane parallel
# intersection line
axis = np.array([-d[1], d[0], 0])
return axis
def get_angle(epipole, axis):
return np.arctan(epipole[2] / (axis[1] * epipole[0] - axis[0] * epipole[1]))
def get_plane_rotation_matrix(axis, angle):
cos_angle = | np.cos(angle) | numpy.cos |
#!/usr/bin/python3
import numpy,hashlib,appdirs,os.path,os
from .. import helpers
import re
cacheDir = appdirs.user_cache_dir('XpyY',appauthor='<NAME>')
try: os.mkdir(cacheDir)
except FileExistsError: pass
digitre=re.compile(rb'\d')
def parse(infile):
''' returns an array of the given LVMs data
caches data for big files >1MB'''
try: infile.read
except AttributeError: infile = open(infile,'rb')
try: fsize = os.stat(infile.fileno()).st_size
except OSError: pass
else:
if fsize > 1e6:
dataHash = helpers.hash(infile)
cache = os.path.join(cacheDir,dataHash.hexdigest())
try:
return numpy.load(cache)
except FileNotFoundError:
print('uncached')
pass
# will not be reached if loaded from cache
infile.seek(0)
chanCount=0
for line in infile:
try: chanCount = int(re.search(b'Channels\t(\d+)',line).group(1))
except AttributeError: chanCount = max((chanCount, len(line.split(b'\t'))-1))
else: break
infile.seek(0) # we may have consumed the whole infile befor for chanCount
result = []
for line in infile:
if digitre.match(line):
result.extend(map(float,line.split(b'\t')))
result = | numpy.array(result) | numpy.array |
import os
import sys
import six
import time
import math
import socket
import contextlib
import numpy as np
from paddle import fluid
from paddle.io import BatchSampler
from paddle.fluid.layers import collective
from paddle.distributed import ParallelEnv
from paddle.fluid.dygraph.parallel import ParallelStrategy
_parallel_context_initialized = False
class DistributedBatchSampler(BatchSampler):
def __init__(self, dataset, batch_size, shuffle=False, drop_last=False):
self.dataset = dataset
assert isinstance(batch_size, int) and batch_size > 0, \
"batch_size should be a positive integer"
self.batch_size = batch_size
assert isinstance(shuffle, bool), \
"shuffle should be a boolean value"
self.shuffle = shuffle
assert isinstance(drop_last, bool), \
"drop_last should be a boolean number"
self.drop_last = drop_last
self.nranks = ParallelEnv().nranks
self.local_rank = ParallelEnv().local_rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.nranks))
self.total_size = self.num_samples * self.nranks
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
if self.shuffle:
| np.random.RandomState(self.epoch) | numpy.random.RandomState |
import os
import pickle
import _pickle as cPickle
import numpy as np
import math
from PIL import Image
def parse_anno(filename):
objects = []
f=open(filename,'r')
line = f.readline()
while line:
line=line.split(' ')
obj_struct = {}
name=line[0]
cen_x = line[1]
cen_y = line[2]
obj_struct['centroid']=[cen_x,cen_y]
name_file=name
obj_struct['imagename'] = [name_file]
obj_struct['det'] = False
obj_struct['det_name']=''
objects.append(obj_struct)
line = f.readline()
f.close()
imagesetfile = 'train.txt'
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
class_recs = {}
npos = 0
for name in imagenames:
imagename=name
npos=npos+1
class_recs[imagename]= [obj for obj in objects if obj['imagename'][0]==imagename]
print (npos)
return class_recs
if __name__=='__main__':
dithresh = 32
width = 48
# read the gt
test_anno_file = 'anno_train_14.txt'
class_recs = parse_anno(test_anno_file)
# read segmentation
with open('output/segmentation.txt','r') as f:
lines = f.readlines()
splitlines = [x.strip().split() for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
#sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB=BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids) #number of detections
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd): #for every detected bbs
if image_ids[d] not in class_recs:
print (image_ids[d])
else:
R = class_recs[image_ids[d]]
print (image_ids[d])
BBGT =[x['centroid'] for x in R]
BBGT = np.transpose(np.array(BBGT).astype(float))
distmin = np.inf
bb = BB[d, :].astype(float)
bb_x = (bb[0] + bb[2])/2
bb_y = (bb[1] + bb[3])/2
if BBGT.size > 0:
dist = np.sqrt( | np.square(BBGT[0]-bb_x) | numpy.square |
#!/usr/bin/env python
# coding: utf-8
import os
# if MTA database already exisits, delete to avoid duplicating data
if os.path.exists('mta_data.db'):
os.remove('mta_data.db')
# get MTA data from same year as satellite image
os.system('python get_mta.py "18"')
# import SQLAlchemy
from sqlalchemy import create_engine, inspect
# import data analysis, plotting packages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# set default plot params
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['figure.titlesize'] = 22
plt.rcParams['lines.linewidth'] = 2
plt.rcParams["font.sans-serif"] = 'Tahoma'
def insert_table_from_csv(csv_list, engine):
"""
Adds a .csv as a table in an SQL database
"""
for file in csv_list:
with open(file, 'r') as f:
data = pd.read_csv(f)
data.columns = data.columns.str.strip()
data.to_sql(os.path.splitext(file)[0], con=engine,
index=False, if_exists='replace')
def check_missing_coord(df):
n_missing = max(len(df[np.isnan(df['LAT'])]), len(df[np.isnan(df['LON'])]))
if n_missing != 0:
print(f"\n{n_missing} stations are missing spatial coordinates:")
print(df[np.isnan(df['LAT']) | np.isnan(df['LON'])])
print('\n')
else:
print('\nNo stations are missing spatial coordinates!\n')
# create SQL database engine
mta_data_engine = create_engine("sqlite:///mta_data.db")
# add the geocoded station locations as a table in the MTA database
insert_table_from_csv(['geocoded.csv'], mta_data_engine)
# make sure that the geocoded table was added
insp = inspect(mta_data_engine)
print('\nTables in database:', insp.get_table_names(), '\n')
# rename column 'C/A' in the mta_data table to 'BOOTH' for consistency with the
# geocoded table
pd.read_sql('''
ALTER TABLE mta_data
RENAME COLUMN "C/A" TO "BOOTH";
''',
mta_data_engine);
# join mta_data and geocoded tables on the booth and unit numbers
# select turnstile-level information, include entries and exits, date, time,
# latitude, longitude
# add 'SYSTEM' to differentiate turstiles in the PATH vs. MTA systems
# -- note: many PATH systems are missing lat/lon coordinates, so will likely
# need to drop them later
# keep only summer (June, July, August) data and 'REGULAR' scheduled audit
# events, not 'RECOVR AUD' entries to avoid duplicates
mta_df_read = pd.read_sql('''
SELECT a.booth, a.unit, a.scp, a.station, a.linename, a.division,
a.date, a.time,
(a.date || ' ' || a.time) AS DATE_TIME,
a.entries, a.exits,
b.lat AS LAT, b.lon AS LON,
(CASE
WHEN b.booth LIKE '%PTH%' THEN 'PATH'
ELSE 'MTA'
END) AS SYSTEM
FROM mta_data a
LEFT JOIN geocoded b
ON a.booth = b.booth AND a.unit = b.unit
WHERE a.date >= '06/01/2018'
AND a.date <= '08/31/2018'
AND a.desc = 'REGULAR';
''',
mta_data_engine)
mta_df = mta_df_read.copy(deep=True)
# convert 'DATE_TIME' column to datetime format
mta_df['DATE_TIME'] = pd.to_datetime(mta_df.DATE_TIME,
format = '%m/%d/%Y %H:%M:%S')
# sort linenames
mta_df['LINENAME'] = mta_df['LINENAME'].apply(lambda x: ''.join(sorted(x)))
mta_df.sort_values('DATE_TIME', ascending=True, inplace=True)
# new columns with the datetime of the previous measurement, and entries
# measurement at the previous measurement
mta_df[['PREV_DATE_TIME', 'PREV_ENTRIES']] = mta_df.groupby(['BOOTH', 'UNIT', 'SCP', 'STATION'])[['DATE_TIME', 'ENTRIES']].shift(1)
# drop the rows for the first measurement in the df because there is no
# 'PREV_DATE_TIME', and reset the index
mta_df.dropna(subset=['PREV_DATE_TIME'], axis=0, inplace=True)
mta_df.reset_index(drop=True, inplace=True)
# calculate the amount of time (in seconds) between measurements
mta_df['DELTA_DATE_TIME_SEC'] = np.abs(mta_df['DATE_TIME'] - mta_df['PREV_DATE_TIME']) / pd.Timedelta(seconds=1)
# to deal with turnstiles that are counting in reverse, always take the abs
# value of the difference between current entries and previous entries
# for hourly entries that are very high, as an upper limit, use 3 people per
# turnstile per second
pps = 3
mta_df['HOURLY_ENTRIES'] = [np.abs(entries - mta_df['PREV_ENTRIES'][i]) \
if (np.abs(entries - mta_df['PREV_ENTRIES'][i]) < pps*mta_df['DELTA_DATE_TIME_SEC'][i]) \
else (pps*mta_df['DELTA_DATE_TIME_SEC'][i]) \
for i,entries in enumerate(mta_df['ENTRIES'])]
# group unique stations (some stations have the same name, but are on different
# lines)
# sum the hourly entries to get the total number of entries over the time period
# sort by net entries
station_df = mta_df.groupby(['STATION','LINENAME'], as_index=False).agg({'LAT':'first', 'LON':'first', 'SYSTEM':'first', 'HOURLY_ENTRIES':'sum'}).sort_values('HOURLY_ENTRIES', ascending=False)
# rename the 'HOURLY_ENTRIES' column to 'NET_ENTRIES'
station_df.rename(columns={'HOURLY_ENTRIES': 'NET_ENTRIES'}, inplace=True)
# check which stations are missing lat/lon coordinates
check_missing_coord(station_df)
# drop the few stations that are missing lat/lon coordinates, ONLY IF they
# are also PATH stations
station_df = station_df[~((np.isnan(station_df['LAT']) | np.isnan(station_df['LON'])) & (station_df['SYSTEM'] == 'PATH'))]
# make sure there are no remaining stations that are missing lat/lon coordinates
check_missing_coord(station_df)
# reset index so that index corresponds to ranked net entries
station_df.reset_index(drop=True, inplace=True)
# create 'CROWD_INDEX'
min_entries = np.min(station_df['NET_ENTRIES'])
max_entries = | np.max(station_df['NET_ENTRIES']) | numpy.max |
#!/usr/bin/env python
from __future__ import print_function
import math
import numpy
import matplotlib
matplotlib.use("PDF")
fig_size = [8.3,11.7] # din A4
params = {'backend': 'pdf',
'axes.labelsize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': fig_size}
matplotlib.rcParams.update(params)
matplotlib.rc('font',**{'family':'serif','serif':['Computer Modern']})
import pylab
import scipy
import scipy.interpolate
import scipy.integrate
nm=1.
m=1.
wlens=numpy.array(
[290.*nm, 310.*nm, 330.*nm, 350.*nm, 370.*nm,
390.*nm, 410.*nm, 430.*nm, 450.*nm, 470.*nm,
490.*nm, 510.*nm, 530.*nm, 550.*nm, 570.*nm,
590.*nm, 610.*nm]
)
scatLength=[16.67194612*m, 20.24988356*m, 23.828246*m, 27.60753133*m, 31.54474622*m,
35.6150723*m, 39.79782704*m, 44.07227854*m, 48.42615012*m, 52.84574328*m,
57.31644409*m, 61.83527084*m, 66.38783775*m, 70.97232079*m, 75.58007709*m,
80.20532563*m, 84.84642797*m]
refIndex=[1.374123775, 1.368496907, 1.364102384, 1.360596772, 1.357746292,
1.355388160, 1.353406686, 1.351718123, 1.350260806, 1.348988618,
1.347866574, 1.346867782, 1.345971300, 1.345160644, 1.344422686,
1.343746868, 1.343124618]
# very old model, do not use!
absLength=[4.750413286*m, 7.004812306*m, 9.259259259*m, 14.92537313*m, 20.00000000*m,
26.31578947*m, 34.48275862*m, 43.47826087*m, 50.00000000*m, 62.50000000*m,
58.82352941*m, 50.00000000*m, 29.41176471*m, 17.85714286*m, 16.12903226*m,
8.849557522*m, 4.504504505*m]
# taken from Geasim (used by km3 by default)
# original comment says:
# "mix from Antares and Smith-Baker (smallest value for each bin)"
absCoeffGeasim=[0.2890,0.2440,0.1570,0.1080,0.0799,0.0708
,0.0638,0.0558,0.0507,0.0477,0.0357,0.0257,0.0196
,0.0182,0.0182,0.0191,0.0200,0.0218,0.0237,0.0255
,0.0291,0.0325,0.0363,0.0415,0.0473,0.0528,0.0629
,0.0710,0.0792,0.0946,0.1090,0.1390,0.215]
wlenGeasim= [610., 600., 590., 580., 570., 560.,
550., 540., 530., 520., 510., 500., 490.,
480., 470., 460., 450., 440., 430., 420.,
410., 400., 390., 380., 370., 360., 350.,
340., 330., 320., 310., 300., 290.]
interpolatedAbsorptionCoeffsGeasim = scipy.interpolate.interp1d(wlenGeasim[::-1], absCoeffGeasim[::-1], bounds_error=False)
AbsorptionCoeffsSmithBaker = numpy.loadtxt("ExistingData/AbsorptionCoefficients_SmithBaker.txt", unpack=True)
interpolatedAbsorptionCoeffsSmithBaker = scipy.interpolate.interp1d(AbsorptionCoeffsSmithBaker[0], AbsorptionCoeffsSmithBaker[1])
AbsorptionCoeffsPopeFry = numpy.loadtxt("ExistingData/AbsorptionCoefficients_PopeFry.txt", unpack=True)
interpolatedAbsorptionCoeffsPopeFry = scipy.interpolate.interp1d(AbsorptionCoeffsPopeFry[0], AbsorptionCoeffsPopeFry[1],bounds_error=False)
AbsorptionAntaresMeasurement = numpy.loadtxt("ExistingData/ANTARES_measured_absorption.txt", unpack=True)
AbsorptionAntaresMeasurement_Test3_Saclay = | numpy.loadtxt("ExistingData/ANTARES_measured_absorption_Test3_Saclay.txt", unpack=True) | numpy.loadtxt |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]),
'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]),
'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]),
'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]),
'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]),
'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]),
'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]),
'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]),
'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]),
'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]),
'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]),
'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]),
'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&285': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&286': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&290': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&291': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&292': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&293': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&294': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&295': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&296': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&306': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&307': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&309': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&310': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&2': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&3': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&6': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&7': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&9': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&10': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&12': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&13': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&14': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&17': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&18': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&21': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&22': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&24': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&25': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&27': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&28': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&29': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&32': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&33': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&36': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&37': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&39': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&40': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&42': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&43': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&44': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&45': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&46': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&50': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&51': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&52': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&53': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&54': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&55': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&56': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&66': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&67': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&69': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&70': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&71': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&75': np.array([0.0, 0.47562425924289314]),
'versicolor&2&76': np.array([0.0, 0.4854368956593117]),
'versicolor&2&77': np.array([0.0, -0.7348263896003956]),
'versicolor&2&78': np.array([0.0, -0.7920887571493729]),
'versicolor&2&79': np.array([0.0, -0.507614207038711]),
'versicolor&2&80': np.array([0.0, 0.4854368956593117]),
'versicolor&2&81': np.array([0.0, -0.3982542883933272]),
'versicolor&2&82': np.array([0.0, -0.08633733326458487]),
'versicolor&2&83': np.array([0.0, 0.4039238345412103]),
'versicolor&2&84': np.array([0.0, -0.38897705551367706]),
'versicolor&2&85': np.array([0.0, -0.06915310813754129]),
'versicolor&2&86': np.array([0.0, 0.41580041887839214]),
'versicolor&2&87': np.array([0.0, -0.7348263896003956]),
'versicolor&2&88': np.array([0.0, -0.7348263896003956]),
'versicolor&2&89': np.array([0.0, -0.7920887571493729]),
'versicolor&2&90': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&91': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&92': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&93': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&94': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&95': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&96': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&97': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&98': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&99': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&100': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&101': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&102': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&103': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&104': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&105': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&106': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&107': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&108': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&109': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&110': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&111': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&112': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&113': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&114': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&115': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&116': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&117': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&118': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&119': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&120': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&121': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&122': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&123': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&124': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&125': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&126': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&127': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&128': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&129': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&130': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&131': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&132': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&133': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&134': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&135': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&136': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&137': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&138': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&139': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&140': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&141': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&142': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&143': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&144': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&145': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&146': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&147': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&148': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&149': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&150': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&151': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&152': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&153': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&154': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&155': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&156': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&157': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&158': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&159': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&160': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&161': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&162': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&163': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&164': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&165': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&166': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&167': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&168': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&169': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&170': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&171': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&172': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&173': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&174': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&175': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&176': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&177': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&178': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&179': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&180': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&181': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&182': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&183': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&184': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&185': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&186': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&187': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&188': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&189': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&190': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&191': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&192': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&193': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&194': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&195': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&196': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&197': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&198': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&199': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&200': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&201': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&202': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&203': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&204': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&205': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&206': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&207': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&208': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&209': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&210': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&211': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&212': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&213': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&214': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&215': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&216': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&217': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&218': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&219': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&220': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&221': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&222': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&223': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&224': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&225': np.array([-0.5775629083348267, 0.7118687782288384]),
'versicolor&2&226': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&227': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&228': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&229': np.array([-0.25603689955471853, -0.451727980232351]),
'versicolor&2&230': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&231': np.array([-0.1269405801024398, -0.34161216844748166]),
'versicolor&2&232': np.array([-0.33176333807327857, 0.09538228407203546]),
'versicolor&2&233': np.array([-0.564696311454556, 0.6421194512020755]),
'versicolor&2&234': np.array([-0.12669523681593967, -0.32786313310034665]),
'versicolor&2&235': np.array([-0.35960845047491363, 0.1335988694092619]),
'versicolor&2&236': np.array([-0.589572650064144, 0.6697478899606418]),
'versicolor&2&237': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&238': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&239': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&240': np.array([0.05667262840030629, 0.4335746514880877]),
'versicolor&2&241': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&242': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&243': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&244': np.array([0.3463149754241171, -0.5568366400939154]),
'versicolor&2&245': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&246': np.array([0.4022739113634462, -0.4700171786183992]),
'versicolor&2&247': np.array([0.5046771347249378, -0.33609610934748635]),
'versicolor&2&248': np.array([0.1370187510624256, 0.30303755274337163]),
'versicolor&2&249': np.array([0.41683021879255133, -0.4812793747667524]),
'versicolor&2&250': np.array([0.5150371666265885, -0.33852139184639396]),
'versicolor&2&251': np.array([0.10611499646955676, 0.33589829339460586]),
'versicolor&2&252': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&253': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&254': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&255': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&256': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&257': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&258': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&259': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&260': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&261': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&262': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&263': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&264': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&265': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&266': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&267': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&268': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&269': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&270': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&271': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&275': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&276': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&277': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&278': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&279': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&280': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&281': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&285': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&286': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&290': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&291': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&292': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&293': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&294': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&295': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&296': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&300': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&301': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&305': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&306': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&307': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&308': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&309': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&310': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&311': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'virginica&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&2': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&3': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&6': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&7': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&9': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&10': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&12': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&13': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&14': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&17': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&18': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&21': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&22': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&24': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&25': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&27': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&28': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&29': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&32': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&33': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&36': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&37': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&39': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&40': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&42': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&43': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&44': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&45': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&46': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&47': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&48': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&49': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&50': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&51': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&52': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&53': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&54': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&55': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&56': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&57': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&58': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&59': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&62': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&63': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&66': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&67': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&69': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&70': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&72': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&73': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&74': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&0&75': np.array([0.0, -0.95124502153736]),
'virginica&0&76': np.array([0.0, -0.9708703761803881]),
'virginica&0&77': np.array([0.0, -0.5659706098422994]),
'virginica&0&78': np.array([0.0, -0.3962828716108186]),
'virginica&0&79': np.array([0.0, 0.2538069363248767]),
'virginica&0&80': np.array([0.0, -0.9708703761803881]),
'virginica&0&81': np.array([0.0, -0.5659706098422994]),
'virginica&0&82': np.array([0.0, -0.3962828716108186]),
'virginica&0&83': np.array([0.0, -0.8943993997517804]),
'virginica&0&84': np.array([0.0, -0.5659706098422994]),
'virginica&0&85': np.array([0.0, -0.3962828716108186]),
'virginica&0&86': np.array([0.0, -0.9166476163222441]),
'virginica&0&87': np.array([0.0, -0.3962828716108186]),
'virginica&0&88': np.array([0.0, -0.5466925844560601]),
'virginica&0&89': np.array([0.0, -0.38529908946531777]),
'virginica&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&92': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&93': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&96': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&97': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&99': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&100': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&102': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&103': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&104': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&107': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&108': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&111': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&112': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&114': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&115': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&117': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&118': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&119': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&120': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&121': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&122': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&123': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&124': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&125': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&126': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&127': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&128': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&129': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&130': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&131': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&132': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&133': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&134': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&137': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&138': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&141': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&142': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&144': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&145': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&147': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&148': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&149': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&152': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&153': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&156': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&157': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&159': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&160': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&162': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&163': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&164': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&167': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&168': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&171': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&172': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&174': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&175': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&177': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&178': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&179': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&180': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&181': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&182': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&183': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&184': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&185': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&186': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&187': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&188': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&189': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&190': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&191': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&192': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&193': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&194': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&197': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&198': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&201': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&202': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&204': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&205': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&207': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&208': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&209': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&212': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&213': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&216': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&217': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&219': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&220': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&222': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&223': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&224': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&225': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&226': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&227': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&228': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&229': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&230': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&231': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&232': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&233': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&234': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&235': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&236': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&237': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&238': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&239': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'virginica&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&242': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&243': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'virginica&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&246': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&247': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'virginica&0&249': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&250': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'virginica&0&252': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&253': np.array([-0.544626974647221, -0.24972982107967573]),
'virginica&0&254': np.array([-0.6426355680762406, -0.20016519137103667]),
'virginica&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&257': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&258': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&261': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&262': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&264': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&265': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&267': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&268': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&269': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&270': np.array([-0.04201361383207032, -0.9372571358382161]),
'virginica&0&271': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&272': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&273': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&274': | np.array([-0.4167677904879879, 0.22207334821665425]) | numpy.array |
import numpy as np
import pandas as pd
import mxnet as mx
import logging
# set to 0 to train on all available data
VALIDATION_SIZE = 1000
# create the training
dataset = pd.read_csv("./train.csv")
target = dataset[[0]].values.ravel()
train = dataset.iloc[:,1:].values
def get_lenet():
data = mx.symbol.Variable('data')
# first conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max",
kernel=(3,3), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(3,3), num_filter=50)
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max",
kernel=(3,3), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=10)
# loss
lenet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
return lenet
# split dataset
val_data = train[:VALIDATION_SIZE].astype('float32')
val_label = target[:VALIDATION_SIZE]
train_data = train[VALIDATION_SIZE: , :].astype('float32')
train_label = target[VALIDATION_SIZE:]
train_data = | np.array(train_data) | numpy.array |
import numpy as np
from gym import spaces
from agents import SimpleAgentClass
# Create agents for the CMA-ES, NEAT and WANN agents
# defined in the weight-agnostic paper repo:
# https://github.com/google/brain-tokyo-workshop/tree/master/WANNRelease/
# -------------------------------------------------------------------
# Here begins copy/paste from WANNRelease code linked above
def weightedRandom(weights):
"""Returns random index, with each choices chance weighted
Args:
weights - (np_array) - weighting of each choice
[N X 1]
Returns:
i - (int) - chosen index
"""
minVal = np.min(weights)
weights = weights - minVal # handle negative vals
cumVal = np.cumsum(weights)
pick = np.random.uniform(0, cumVal[-1])
for i in range(len(weights)):
if cumVal[i] >= pick:
return i
def selectAct(action, actSelect):
"""Selects action based on vector of actions
Single Action:
- Hard: a single action is chosen based on the highest index
- Prob: a single action is chosen probablistically with higher values
more likely to be chosen
We aren't selecting a single action:
- Softmax: a softmax normalized distribution of values is returned
- Default: all actions are returned
Args:
action - (np_array) - vector weighting each possible action
[N X 1]
Returns:
i - (int) or (np_array) - chosen index
[N X 1]
"""
if actSelect == 'softmax':
action = softmax(action)
elif actSelect == 'prob':
action = weightedRandom(np.sum(action,axis=0))
else:
action = action.flatten()
return action
def act(weights, aVec, nInput, nOutput, inPattern):
"""Returns FFANN output given a single input pattern
If the variable weights is a vector it is turned into a square weight matrix.
Allows the network to return the result of several samples at once if given a matrix instead of a vector of inputs:
Dim 0 : individual samples
Dim 1 : dimensionality of pattern (# of inputs)
Args:
weights - (np_array) - ordered weight matrix or vector
[N X N] or [N**2]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
nInput - (int) - number of input nodes
nOutput - (int) - number of output nodes
inPattern - (np_array) - input activation
[1 X nInput] or [nSamples X nInput]
Returns:
output - (np_array) - output activation
[1 X nOutput] or [nSamples X nOutput]
"""
# Turn weight vector into weight matrix
if np.ndim(weights) < 2:
nNodes = int(np.sqrt(np.shape(weights)[0]))
wMat = np.reshape(weights, (nNodes, nNodes))
else:
nNodes = np.shape(weights)[0]
wMat = weights
wMat[np.isnan(wMat)]=0
# Vectorize input
if np.ndim(inPattern) > 1:
nSamples = np.shape(inPattern)[0]
else:
nSamples = 1
# Run input pattern through ANN
nodeAct = np.zeros((nSamples,nNodes))
nodeAct[:,0] = 1 # Bias activation
nodeAct[:,1:nInput+1] = inPattern
# Propagate signal through hidden to output nodes
iNode = nInput+1
for iNode in range(nInput+1,nNodes):
rawAct = np.dot(nodeAct, wMat[:,iNode]).squeeze()
nodeAct[:,iNode] = applyAct(aVec[iNode], rawAct)
#print(nodeAct)
output = nodeAct[:,-nOutput:]
return output
def applyAct(actId, x):
"""Returns value after an activation function is applied
Lookup table to allow activations to be stored in numpy arrays
case 1 -- Linear
case 2 -- Unsigned Step Function
case 3 -- Sin
case 4 -- Gausian with mean 0 and sigma 1
case 5 -- Hyperbolic Tangent [tanh] (signed)
case 6 -- Sigmoid unsigned [1 / (1 + exp(-x))]
case 7 -- Inverse
case 8 -- Absolute Value
case 9 -- Relu
case 10 -- Cosine
case 11 -- Squared
Args:
actId - (int) - key to look up table
x - (???) - value to be input into activation
[? X ?] - any type or dimensionality
Returns:
output - (float) - value after activation is applied
[? X ?] - same dimensionality as input
"""
if actId == 1: # Linear
value = x
if actId == 2: # Unsigned Step Function
value = 1.0*(x>0.0)
#value = (np.tanh(50*x/2.0) + 1.0)/2.0
elif actId == 3: # Sin
value = np.sin(np.pi*x)
elif actId == 4: # Gaussian with mean 0 and sigma 1
value = np.exp(-np.multiply(x, x) / 2.0)
elif actId == 5: # Hyperbolic Tangent (signed)
value = np.tanh(x)
elif actId == 6: # Sigmoid (unsigned)
value = ( | np.tanh(x/2.0) | numpy.tanh |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import imageio
import numpy as np
import SimpleITK as sitk
import transformations as T
import tensorflow as tf
import sys
sys.path.append('/vol/medic01/users/bh1511/_build/geomstats-farrell/')
from geomstats.special_orthogonal_group import SpecialOrthogonalGroup
###############################################################################
# Tensorflow feature wrapper
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
###############################################################################
# Dataset Locations
NIFTI_ROOT = '/vol/medic01/users/bh1511/DATA_RAW/AliceBrainReconsAligned/test/'
SAVE_DIR = ''
n_rotations = 2000
n_offsets = 16
max_z = 40
###############################################################################
# Data Generation
SO3_GROUP = SpecialOrthogonalGroup(3)
database = glob.glob(NIFTI_ROOT+'/*.nii.gz')
for fetal_brain in database:
print('Parsing:', fetal_brain)
fixed_image_sitk_tmp = sitk.ReadImage(fetal_brain, sitk.sitkFloat32)
fixed_image_sitk = sitk.GetImageFromArray(sitk.GetArrayFromImage(fixed_image_sitk_tmp))
fixed_image_sitk = sitk.RescaleIntensity(fixed_image_sitk, 0, 1)
writer = tf.python_io.TFRecordWriter(SAVE_DIR +
os.path.basename(fetal_brain).replace('.nii.gz','.tfrecord'))
rotations = np.pi * ( | np.random.rand(n_rotations, 3) | numpy.random.rand |
import cv2
import utils
import numpy as np
from functools import partial
def edge_detection_grad_callback(
val, img: np.ndarray, win_name: str
):
stdevX10 = cv2.getTrackbarPos('stdev', win_name)
threshold = cv2.getTrackbarPos('threshold', win_name)
alpha1 = cv2.getTrackbarPos('alpha1', win_name)
alpha0 = cv2.getTrackbarPos('alpha0', win_name)
global img_edges
if stdevX10 > 0:
img_edges = cv2.GaussianBlur(img, (0, 0), stdevX10/10., stdevX10/10.)
else:
img_edges = img.copy()
img_dx = cv2.Sobel(img_edges, cv2.CV_32F, 1, 0)
img_dy = cv2.Sobel(img_edges, cv2.CV_32F, 0, 1)
mag, angle = cv2.cartToPolar(img_dx, img_dy, angleInDegrees=True)
temp = np.where(angle >= alpha0, 255, 0)
temp2 = np.where(angle <= alpha1, 255, 0)
temp = np.where((temp + temp2) != 0, 255, 0)
temp2 = np.where(mag > threshold, 255, 0)
img_edges = np.where((temp * temp2) != 0, 255, 0).astype('uint8')
cv2.imshow(win_name, img_edges)
def hough_callback(val, img: np.ndarray, win_name: str):
drho = cv2.getTrackbarPos('drho', win_name)
dtheta = cv2.getTrackbarPos('dtheta', win_name)
accum = cv2.getTrackbarPos('accum', win_name)
n = cv2.getTrackbarPos('n', win_name)
if drho <= 0:
return
if dtheta <= 0:
return
if accum <= 0:
return
img_copy = img.copy()
lines = cv2.HoughLines(img_edges.astype('uint8'), drho, dtheta/180.0, accum)
n = n if n < len(lines) else len(lines)
for [[rho, theta]] in lines[:n]:
if (theta < (np.pi / 4.)) or (theta > 3. * np.pi):
pt1 = (int(rho / np.cos(theta)), 0)
pt2 = (
int(rho - img_copy.shape[0] * | np.sin(theta) | numpy.sin |
import pandas as pd
def subset_grm(grm, grm_indiv, target_indiv):
set_target_indiv = set(target_indiv)
isin = | np.array([ g in set_target_indiv for g in grm_indiv ]) | numpy.array |
# Licensed under an MIT open source license - see LICENSE
import numpy as np
import pytest
from .. import Dendrogram, periodic_neighbours, Structure
class Test2DimensionalData(object):
def test_dendrogramWithNan(self):
n = np.nan
data = np.array([[n, n, n, n, n, n, n, n],
[n, 4, n, n, n, n, n, n],
[n, n, n, 1, n, n, 0, 5],
[3, n, n, 2, 3, 2, 0, n]])
d = Dendrogram.compute(data)
########################################
# Check the trunk elements:
leaves = [structure for structure in d.trunk if structure.is_leaf]
branches = [structure for structure in d.trunk if structure not in leaves]
assert len(leaves) == 2, "We expect two leaves among the lowest structures (the trunk)"
assert len(branches) == 1, "We expect one branch among the lowest structures (the trunk)"
for leaf in leaves:
assert len(leaf.values(subtree=False)) == 1, "Leaves in the trunk are only expected to contain one point"
assert leaf.parent is None
assert leaf.ancestor == leaf
assert leaf.get_npix() == 1
if leaf.values(subtree=False)[0] == 4:
assert list(zip(*leaf.indices(subtree=False)))[0] == (1, 1)
elif leaf.values(subtree=False)[0] == 3:
assert list(zip(*leaf.indices(subtree=False)))[0] == (3, 0)
else:
self.fail("Invalid value of flux in one of the leaves")
########################################
# Check properties of the branch:
branch = branches[0]
assert branch.parent is None
assert branch.ancestor == branch
assert branch.get_npix(subtree=False) == 1 # only pixel is a 0
assert branch.get_npix(subtree=True) == 7
assert len(branch.children) == 2
for leaf in branch.children:
assert leaf.is_leaf
assert leaf.ancestor == branch
assert leaf.parent == branch
if 5 in leaf.values(subtree=False):
assert sum(leaf.values(subtree=False)) == 5
elif 3 in leaf.values(subtree=False):
assert sum(leaf.values(subtree=False)) == 1 + 2 + 3 + 2
else:
self.fail("Invalid child of the branch")
def test_mergeLevelAndHeight(self):
n = np.nan
data = np.array([[n, n, n, n, n, ],
[n, 4, 2, 5, n, ],
[n, n, n, n, 0, ]])
d = Dendrogram.compute(data)
branch, leaf4, leaf5 = d.trunk[0], d.structure_at((1, 1)), d.structure_at((1, 3))
assert leaf4.height == 4.
assert leaf5.height == 5.
assert branch.height == 4.
def test_dendrogramWithConstBackground(self):
# Test a highly artificial array containing a lot of equal pixels
data = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 3, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 3, 4, 3, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 3, 4, 3, 1, 1, 2, 2, 1, 1, 1],
[1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ])
d = Dendrogram.compute(data)
assert len(d) <= 7
# Some of the '1' valued pixels get included with the leaves and branches,
# hence number of structures is currently 7 and not 6 as expected.
# Fixing this is probably more trouble than it's worth.
leaf_with_twos = d.structure_at((10, 9))
assert leaf_with_twos.height == 2
# Check that all structures contain a reference to the dendrogram
for structure in d:
assert structure._dendrogram is d
class Test3DimensionalData(object):
def setup_method(self, method):
from ._testdata import data
self.data = data
def test_dendrogramComputation(self):
d = Dendrogram.compute(self.data, min_npix=8, min_delta=0.3, min_value=1.4)
# This data with these parameters should produce 55 leaves
assert len(d.leaves) == 55
# Now check every pixel in the data cube (this takes a while).
st_map = -np.ones(self.data.shape, dtype=np.int)
for st in d.all_structures:
st_map[st.indices(subtree=False)] = st.idx
#check that vmin/vmax/peak are correct
for st in d.all_structures:
assert st.vmin == self.data[st.indices(subtree=False)].min()
assert st.vmax == self.data[st.indices(subtree=False)].max()
pk_exp = self.data[st.indices(subtree=True)].max()
ind, pk = st.get_peak(subtree=True)
assert self.data[ind] == pk
assert pk_exp == pk
# The "right" way to do this is loop through indices,
# and repeatedly call structure_at(). However, this is quite slow
# structure_at is a thin wrapper around index_map,
# and we compare index_map to st_map instead
np.testing.assert_array_equal(st_map, d.index_map)
# here, we test a few values of structure_at
for coord in np.indices(self.data.shape).reshape(self.data.ndim, np.prod(self.data.shape)).transpose()[::100]:
coord = tuple(coord)
f = self.data[coord]
structure = d.structure_at(coord)
if structure is not None:
assert structure.idx == st_map[coord], "Pixel at {0} is claimed to be part of {1}, but that structure does not contain the coordinate {0}!".format(coord, structure)
else:
assert st_map[coord] == -1
class TestNDimensionalData(object):
def test_4dim(self):
" Test 4-dimensional data "
data = np.zeros((5, 5, 5, 5)) # Create a 5x5x5x5 array initialized to zero
# N-dimensional data is hard to conceptualize so I've kept this simple.
# Create a local maximum (value 5) at the centre
data[2, 2, 2, 2] = 5
# add some points around it with value 3. Note that '1:4:2' is equivalent to saying indices '1' and '3'
data[2, 1:4:2, 2, 2] = data[2, 2, 1:4:2, 2] = data[2, 2, 2, 1:4:2] = 3
# Add a trail of points of value 2 connecting one of those 3s to a 4
data[0:3, 0, 2, 2] = 2 # Sets [0, 0, 2, 2], [1, 0, 2, 2], and [2, 0, 2, 2] all equal to 2 -> will connect to the '3' at [2, 1, 2, 2]
data[0, 0, 2, 1] = 4
# Now dendrogram it:
d = Dendrogram.compute(data, min_value=1)
# We expect two leaves:
leaves = d.leaves
assert len(leaves) == 2
# We expect one branch:
branches = [i for i in d.all_structures if i.is_branch]
assert len(branches) == 1
assert len(d.trunk) == 1
assert d.trunk[0] == branches[0]
# The maxima of each leaf should be at [2,2,2,2] and [0,3,2,1]
for leaf in leaves:
assert leaf.get_peak() in (((2, 2, 2, 2), 5.), ((0, 0, 2, 1), 4.))
assert leaves[0].get_peak() != leaves[1].get_peak()
# Check out a few more properties of the leaf around the global maximum:
leaf = d.structure_at((2, 2, 2, 2))
assert leaf.vmax == 5
assert leaf.vmin == 2
assert leaf.get_npix() == 1 + 6 + 2 # Contains 1x '5', 6x '3', and 2x '2'. The other '2' should be in the branch
# Check that the only pixel in the branch is a '2' at [0,0,2,2]
assert (list(zip(*branches[0].indices(subtree=False))), branches[0].values(subtree=False)) == ([(0, 0, 2, 2), ], [2., ])
def test_periodic():
x = np.array([[0, 0, 0, 0, 0, ],
[1, 1, 0, 1, 1],
[0, 0, 0, 0, 0]])
d = Dendrogram.compute(x, min_value=0.5,
neighbours=periodic_neighbours(1))
expected = np.array([[-1, -1, -1, -1, -1],
[0, 0, -1, 0, 0],
[-1, -1, -1, -1, -1]])
| np.testing.assert_array_equal(d.index_map, expected) | numpy.testing.assert_array_equal |
#!/usr/bin/env python
# coding: utf-8
# # Mask R-CNN Demo
#
# A quick intro to using the pre-trained model to detect and segment objects.
import os
import keras
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
if 'PYTHONPATH' in os.environ:
print("Please unset the environment variable PYTHONPATH if you got errors with pycocotools!")
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
#get_ipython().run_line_magic('matplotlib', 'inline')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
# Directory of videos to be saved as detection results
VIDEO_OUTPUT_DIR = os.path.join(ROOT_DIR, "videos")
# ## Configurations
#
# We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.
#
# For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
if os.getenv('IMAGE_MAX_DIM'):
IMAGE_MAX_DIM = int(os.getenv('IMAGE_MAX_DIM'))
if os.getenv('IMAGE_MIN_DIM'):
IMAGE_MIN_DIM = int(os.getenv('IMAGE_MIN_DIM'))
config = InferenceConfig()
config.display()
# ## Create Model and Load Trained Weights
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# ## Class Names
#
# The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
#
# To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
#
# To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.
# ```
# # Load COCO dataset
# dataset = coco.CocoDataset()
# dataset.load_coco(COCO_DIR, "train")
# dataset.prepare()
#
# # Print class names
# print(dataset.class_names)
# ```
#
# We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
# ## Run Object Detection
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import time
class MaskRCNNTracker():
"""Implements tracker based segmentation ouputs.
Params:
-
Inputs:
-
Output:
A dictionay that maps the current frame's instance indexes to
the unique instance IDs that identify individual objects
"""
def __init__(self):
self.instance_id_manager = 0
self.dict_instance_history = {}
self.dict_trajectories = {}
self.instance_memory_length = 2
self.frame_number = 0 # the current frame number
self.image_size = None # the image size (x, y) of the current frame
self.dict_location_prediction = {}
self.dict_trajectory_timestamp = {} # frame number corresponding to the last location
# if an instance disapprears (i.e., no correspondence found), how long do we keep the
# records ?
self.time_keep_records_frames = 50 # in frames
def fill_polygons_in_bounding_map(self, poly_vertices):
"""
Given one or multiple ploygons rach consisting of a sequence of vertices,
determine a box or map that encloses them. Then fill the polygon(s) within
the map and calculate its area.
Input:
- poly_vertices: A list of polygons. Each item is a list of points [x,y]
"""
left = 10000 # sufficiently large coordinate in x
right = 0 # the minimum possible coordinate in x
top = 10000 # sufficiently large coordinate in y
bottom = 0 # the minimum possible coordinate in y
# polyVertices: a list of N-by-2 arrays
for poly in poly_vertices:
left = min(left, np.amin(poly[:,0]))
right = max(right, np.amax(poly[:,0]))
top = min(top, np.amin(poly[:,1]))
bottom = max(bottom, np.amax(poly[:,1]))
pts = []
for poly in poly_vertices:
pts.append(poly-np.array([left,top]))
# This map is a 2-D array
map = np.zeros((bottom-top+1, right-left+1),dtype=np.uint8)
# mask the area
cv2.fillPoly(map, pts, color=(255))
polyArea = np.count_nonzero(map)
return (left, top, right, bottom, map, polyArea, self.frame_number)
def compute_intersection_polygons(self, tuplePolygonA, tuplePolygonB):
"""
Calculate intersection between two regions each outlined by one
or multiple polygons.
Inputs:
- tuplePolygonA, tuplePolygonB: A tuple to represent a region outlined
by one or multiple polygons. See the output of method
"fill_polygons_in_bounding_map".
Return: Intersection over Union (IoU) in the range from 0 to 1.0
"""
# tuplePolygonA and tuplePolygonB
# (xmin, ymin, xmax, ymax, filledPolygon2Dmap, frame_number)
A_left = tuplePolygonA[0]
A_right = tuplePolygonA[2]
A_top = tuplePolygonA[1]
A_bottom = tuplePolygonA[3]
B_left = tuplePolygonB[0]
B_right = tuplePolygonB[2]
B_top = tuplePolygonB[1]
B_bottom = tuplePolygonB[3]
# check if the two maps intersect
if B_left >= A_right or B_top >= A_bottom:
return 0
if A_left >= B_right or A_top >= B_bottom:
return 0
# calculate the overlapping part of the two bounding maps
Overlap_left = max(A_left, B_left)
Overlap_right = min(A_right, B_right)
Overlap_top = max(A_top, B_top)
Overlap_bottom = min(A_bottom, B_bottom)
# get the overlapping part within the two maps respectively
Overlap_A_map = tuplePolygonA[4][(Overlap_top-A_top):(min(A_bottom,Overlap_bottom)-A_top+1),
(Overlap_left-A_left):(min(A_right,Overlap_right)-A_left+1)]
Overlap_B_map = tuplePolygonB[4][(Overlap_top-B_top):(min(B_bottom,Overlap_bottom)-B_top+1),
(Overlap_left-B_left):(min(B_right,Overlap_right)-B_left+1)]
# calculate the intersection between the two silhouettes within the overlapping part
Overlap_map_boolean = np.logical_and(Overlap_A_map, Overlap_B_map)
# calculate the area of silhouette intersection
Overlap_count = np.count_nonzero(Overlap_map_boolean)
Union_count = tuplePolygonA[5] + tuplePolygonB[5] - Overlap_count
return Overlap_count/Union_count
def update_buffers(self):
# Update the buffers (dictionaries) for the past detection results
for uid in self.dict_trajectories:
if (len(self.dict_trajectories[uid]) > 80):
self.dict_trajectories[uid].pop(0)
uid_list = list(self.dict_trajectories.keys())
for uid in uid_list:
if (self.frame_number - self.dict_trajectory_timestamp[uid]) > self.time_keep_records_frames:
self.dict_trajectories.pop(uid)
self.dict_trajectory_timestamp.pop(uid)
def receive_first_segmentation_output(self, results, class_names, image_size):
"""
This method is called when the segmentation results for the very first frame received
Input:
- results: segmentation results as output of Mask R-CNN
- class_names: list of class names of the dataset
- image_size: image size in format (x, y)
Output:
- Tuple:
item 0: the current instance ID to assigned unique ID (dict)
item 1: Contours for current instances (dict)
"""
boxes = results['rois']
masks = results['masks']
class_ids = results['class_ids']
scores = results['scores']
self.image_size = image_size
# Number of instances
N = boxes.shape[0]
if not N:
return None
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# increment the frame counter
self.frame_number = 1
# Find the instances of interest, e.g., persons
instances_of_interest = []
for i in range(N):
class_id = class_ids[i]
if class_id == class_names.index('person') and scores[i] >= 0.75:
instances_of_interest.append(i)
# Find the contours that cover detected instances
dict_contours = {}
for i in instances_of_interest:
# Mask
mask = masks[:, :, i]
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
dict_contours[i] = find_contours(padded_mask, 0.5)
# Analyze the contours and calculate the areas
dict_polygons_in_bounding_map = {}
for i in dict_contours:
pts2d = [] # each element is an array of the shape (-1,2)
for c in dict_contours[i]: # the value is a list
pts2d.append(c.astype(np.int32))
dict_polygons_in_bounding_map[i] = self.fill_polygons_in_bounding_map(pts2d)
# Initialize the buffers
dict_inst_index_to_uid = {} # mapping current frame's instance index to unique ID
assert self.instance_id_manager == 0
for i in dict_polygons_in_bounding_map:
self.instance_id_manager += 1
uid = self.instance_id_manager
dict_inst_index_to_uid[i] = uid
self.dict_instance_history[uid] = [dict_polygons_in_bounding_map[i]]
y1, x1, y2, x2 = boxes[i]
self.dict_trajectories[uid] = [[self.frame_number, (x1 + x2)//2, (y1 + y2)//2]]
self.dict_trajectory_timestamp[uid] = self.frame_number
# calculate the center of the box that encloses a instance's contour
dict_box_center = {}
for i in dict_polygons_in_bounding_map:
cy = (dict_polygons_in_bounding_map[i][0] + dict_polygons_in_bounding_map[i][2])//2
cx = (dict_polygons_in_bounding_map[i][1] + dict_polygons_in_bounding_map[i][3])//2
dict_box_center[i] = (cx, cy)
# predict the locations of indentified instances in the next frame
self.dict_location_prediction = {}
for uid in self.dict_trajectories:
self.dict_location_prediction[uid] = self.predict_location(uid)
return (dict_inst_index_to_uid, dict_contours, dict_box_center)
def receive_subsequent_segmentation_output(self, results, class_names, image_size):
"""
Update tracker states upon new detection results
Input:
- results: segmentation results as output of Mask R-CNN
- class_names: list of class names of the dataset
- image_size: image size in format (x, y)
Output:
- Tuple:
item 0: the current instance ID to assigned unique ID (dict)
item 1: Contours for current instances (dict)
"""
boxes = results['rois']
masks = results['masks']
class_ids = results['class_ids']
scores = results['scores']
self.image_size = image_size
diagonal_line = math.sqrt(image_size[0]*image_size[0] + image_size[1]*image_size[1])
# Number of instances
N = boxes.shape[0]
if not N:
return None
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# increment the frame counter
self.frame_number += 1
# pop up the old data if necessary
self.update_buffers()
# Find the instances of interest, e.g., persons
instances_of_interest = []
for i in range(N):
class_id = class_ids[i]
if class_id == class_names.index('person') and scores[i] >= 0.75:
instances_of_interest.append(i)
# Find the contours that cover detected instances
dict_contours = {}
for i in instances_of_interest:
# Mask
mask = masks[:, :, i]
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
dict_contours[i] = find_contours(padded_mask, 0.5)
# Coorespondence between existing instances and the instances in the current frame
dict_inst_index_to_uid = {} # mapping current frame's instance index to unique ID
list_matching_distance = []
for i in instances_of_interest:
y1, x1, y2, x2 = boxes[i]
x0, y0 = (x1 + x2)//2, (y1 + y2)//2
uid_min = 0 # invalid ID
min_dist2 = diagonal_line * diagonal_line
# here "uid" is a unique ID assigned to each detected instance
for uid in self.dict_location_prediction:
xp, yp = self.dict_location_prediction[uid]
dist2 = (x0 - xp) * (x0 - xp) + (y0 - yp) * (y0 - yp)
if dist2 < min_dist2:
uid_min = uid
min_dist2 = dist2
min_dist = math.sqrt(min_dist2)
#print("uid", uid_min, "min_dist:", min_dist)
if min_dist/diagonal_line < 0.02: # used a ratio threshold independent of image size
list_matching_distance.append((i, uid_min, min_dist))
list_matching_distance.sort(key=lambda item: item[2], reverse=False) # in ascending order
uid_set = set(self.dict_location_prediction.keys())
for e in list_matching_distance: # e is a tuple
i = e[0] # the instance ID in the current frame
uid = e[1] # unique existing instance ID
dist = e[2]
if uid in uid_set:
uid_set.remove(uid) # this unique ID is claimed and won't be taken by other instances
dict_inst_index_to_uid[i] = uid
# What if the instances do not match any of the existing identified instances ?
for i in instances_of_interest:
if i not in dict_inst_index_to_uid: # this would be a new instance
self.instance_id_manager += 1
uid = self.instance_id_manager
dict_inst_index_to_uid[i] = uid
# calculate the center of the box that encloses a instance's contour
dict_box_center = {}
for i in instances_of_interest:
y1, x1, y2, x2 = boxes[i]
x0, y0 = (x1 + x2)//2, (y1 + y2)//2
dict_box_center[i] = (x0, y0)
for i in dict_inst_index_to_uid:
y1, x1, y2, x2 = boxes[i]
uid = dict_inst_index_to_uid[i]
if uid not in self.dict_trajectories:
self.dict_trajectories[uid] = [[self.frame_number, (x1 + x2)//2, (y1 + y2)//2]]
else:
self.dict_trajectories[uid].append([self.frame_number, (x1 + x2)//2, (y1 + y2)//2])
self.dict_trajectory_timestamp[uid] = self.frame_number
# predict the locations of indentified instances in the next frame
self.dict_location_prediction = {}
for uid in self.dict_trajectories:
self.dict_location_prediction[uid] = self.predict_location(uid)
return (dict_inst_index_to_uid, dict_contours, dict_box_center)
def receive_segmentation_output(self, results, class_names, image_size):
"""
Update tracker states upon new detection results
Input:
- results: segmentation results as output of Mask R-CNN
- class_names: list of class names of the dataset
- image_size: image size in format (x, y)
Output:
- Tuple:
item 0: the current instance ID to assigned unique ID (dict)
item 1: Contours for current instances (dict)
"""
if self.instance_id_manager == 0:
return self.receive_first_segmentation_output(results, class_names, image_size)
else:
return self.receive_subsequent_segmentation_output(results, class_names, image_size)
def save_trajectory_to_textfile(self, uid, fname):
"""
Dump a specified instance's location trajectory to a text file
Input:
- uid: Unique instance ID
- fname: out filename
"""
if uid in self.dict_trajectories:
outfile = open(str(fname) + "_%04d"%(uid) + ".txt", "w")
for u in self.dict_trajectories[uid]:
outfile.write(str(u[0])+"\t"+str(u[1])+"\t"+str(u[2])+"\n")
outfile.close()
def estimate_velocity(self, uid):
"""
Return estimated velocity
"""
if uid not in self.dict_trajectories:
return None
pos = np.array(self.dict_trajectories[uid])
m = pos.shape[0] # the number of points (memory for the past images)
if m < 2: # single point
return (0, 0)
# partition the set of points
x0, y0 = pos[0:m//2, 1].mean(), pos[0:m//2, 2].mean()
x1, y1 = pos[m//2:, 1].mean(), pos[m//2:, 2].mean()
timespan = | np.amax([1.0, (pos[-1, 0] - pos[0, 0])/2]) | numpy.amax |
#!/usr/bin/env python36
# -*- coding: utf-8 -*-
"""
Created on 2018/10/6 9:33 AM
@author: Tangrizzly
"""
from __future__ import print_function
import time
import numpy as np
from numpy.random import uniform
import theano
import theano.tensor as T
from theano.tensor.nnet import sigmoid
from theano.tensor import exp
from theano.tensor.shared_randomstreams import RandomStreams
# from theano.tensor.nnet.nnet import softmax # 作用于2d-matrix,按行处理。
from theano.tensor.extra_ops import Unique
__docformat__ = 'restructedtext en'
def exe_time(func):
def new_func(*args, **args2):
t0 = time.time()
print("-- @%s, {%s} start" % (time.strftime("%X", time.localtime()), func.__name__))
back = func(*args, **args2)
print("-- @%s, {%s} end" % (time.strftime("%X", time.localtime()), func.__name__))
print("-- @%.3fs taken for {%s}" % (time.time() - t0, func.__name__))
return back
return new_func
def softmax(x):
# 竖直方向取softmax。
# theano里的是作用于2d-matrix,按行处理。我文中scan里有一步是处理(n,),直接用会报错,所以要重写。
# 按axis=0处理(n, ),会超级方便。
e_x = exp(x - x.max(axis=0, keepdims=True))
out = e_x / e_x.sum(axis=0, keepdims=True)
return out
# 'Obo' is one by one. 逐条训练。
# ======================================================================================================================
class GeoIE:
def __init__(self, train, test, alpha_lambda, n_user, n_item, n_in, n_hidden, ulptai):
# 来自于theano官网的dAE部分。
rng = np.random.RandomState(123)
self.n_hidden = n_hidden
self.thea_rng = RandomStreams(rng.randint(2 ** 30)) # 旗下随机函数可在GPU下运行。
# 用mask进行补全后的train/test
self.ulptai = ulptai
tra_buys_masks, tra_buys_neg_masks, tra_count, tra_masks = train
tes_buys_masks, tes_buys_neg_masks = test
self.tra_masks = theano.shared(borrow=True, value=np.asarray(tra_masks, dtype='int32'))
self.tra_count = theano.shared(borrow=True, value=np.asarray(tra_count, dtype='int32'))
self.tra_buys_masks = theano.shared(borrow=True, value=np.asarray(tra_buys_masks, dtype='int32'))
self.tes_buys_masks = theano.shared(borrow=True, value=np.asarray(tes_buys_masks, dtype='int32'))
self.tra_buys_neg_masks = theano.shared(borrow=True, value=np.asarray(tra_buys_neg_masks, dtype='int32'))
self.tes_buys_neg_masks = theano.shared(borrow=True, value= | np.asarray(tes_buys_neg_masks, dtype='int32') | numpy.asarray |
import sys
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
err = np.seterr(invalid="ignore", divide="ignore")
try:
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
finally:
np.seterr(**err)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cinf = np.array([complex(np.inf, 0)])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x=np.array([1,2,3], np.int16)
assert (x**2.00001).dtype is (x**2.0).dtype
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec)
def test_logaddexp_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
def assert_hypot_isinf(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
@dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6),
"python.org < 2.6 binaries have broken ldexp in the "
"C runtime")
def test_ldexp_overflow(self):
# silence warning emitted on overflow
err = np.seterr(over="ignore")
try:
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
finally:
np.seterr(**err)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
assert_equal(np.maximum.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1,2j]),2j)
assert_equal(np.minimum.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1,2j]),1)
assert_equal(np.fmax.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1,2j]),2j)
assert_equal(np.fmin.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
olderr = np.seterr(invalid='ignore')
try:
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
finally:
np.seterr(**olderr)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x,x)) is np.ndarray)
self.assertTrue(type(f(x,a)) is A)
self.assertTrue(type(f(x,b)) is B)
self.assertTrue(type(f(x,c)) is C)
self.assertTrue(type(f(a,x)) is A)
self.assertTrue(type(f(b,x)) is B)
self.assertTrue(type(f(c,x)) is C)
self.assertTrue(type(f(a,a)) is A)
self.assertTrue(type(f(a,b)) is B)
self.assertTrue(type(f(b,a)) is B)
self.assertTrue(type(f(b,b)) is B)
self.assertTrue(type(f(b,c)) is C)
self.assertTrue(type(f(c,b)) is C)
self.assertTrue(type(f(c,c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True,True])
a = np.array([True,True])
assert_equal(np.choose(c, (a, 1)), np.array([1,1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh :
x = 1.5
else :
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s'%f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f)
def test_precisions_consistent(self) :
z = 1 + 1j
for f in self.funcs :
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1
@dec.knownfailureif(True, "These branch cuts are known to fail")
def test_branch_cuts_failing(self):
# XXX: signed zero not OK with ICC on 64-bit platform for log, see
# http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
# XXX: signed zeros are not OK for sqrt or for the arc* functions
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True
def test_against_cmath(self):
import cmath, sys
# cmath.asinh is broken in some versions of Python, see
# http://bugs.python.org/issue1381
broken_cmath_asinh = False
if sys.version_info < (2,6):
broken_cmath_asinh = True
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
if cname == 'asinh' and broken_cmath_asinh:
continue
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b))
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[ | np.argmax(d) | numpy.argmax |
import os
import random
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import scipy.sparse as sp
from itertools import chain
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier
def init_random_seed(manual_seed):
seed = None
if manual_seed is None:
seed = random.randint(1,10000)
else:
seed = manual_seed
print("use random seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def init_model(net, device, restore):
if restore is not None and os.path.exits(restore):
net.load_state_dict(torch.load(restore))
net.restored = True
print("Restore model from: {}".format(os.path.abspath(restore)))
else:
pass
if torch.cuda.is_available():
cudnn.benchmark =True
net.to(device)
return net
def save_model(net, model_root, filename):
if not os.path.exists(model_root):
os.makedirs(model_root)
torch.save(net.state_dict(), os.path.join(model_root, filename))
print("save pretrained model to: {}".format(os.path.join(model_root, filename)))
#-||x_i-x_j||^2
# def neg_squared_euc_dists(X):
# sum_X = np.sum(np.square(X), 1)
# D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)
# return -D
#input -||x_i-x_j||^2/2*sigma^2, compute softmax
def softmax(D, diag_zero=True):
# e_x = np.exp(D)
e_x = np.exp(D - | np.max(D, axis=1) | numpy.max |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import num2epoch, epoch2num
import numpy as np
from astropy.time import Time
from matplotlib.dates import (YearLocator, MonthLocator, DayLocator,
HourLocator, MinuteLocator, SecondLocator,
DateFormatter, epoch2num)
from matplotlib.ticker import FixedLocator, FixedFormatter
MIN_TSTART_UNIX = Time('1999:100', format='yday').unix
MAX_TSTOP_UNIX = Time(Time.now()).unix + 1e7
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Provide useful utilities for matplotlib."""
# Default tick locator and format specification for making nice time axes
TICKLOCS = ((YearLocator, {'base': 5}, '%Y', YearLocator, {'base': 1}),
(YearLocator, {'base': 4}, '%Y', YearLocator, {'base': 1}),
(YearLocator, {'base': 2}, '%Y', YearLocator, {'base': 1}),
(YearLocator, {'base': 1}, '%Y', MonthLocator, {'bymonth': (1, 4, 7, 10)}),
(MonthLocator, {'bymonth': list(range(1, 13, 6))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {'bymonth': list(range(1, 13, 4))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {'bymonth': list(range(1, 13, 3))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {'bymonth': list(range(1, 13, 2))}, '%Y-%b', MonthLocator, {}),
(MonthLocator, {}, '%Y-%b', DayLocator, {'bymonthday': (1, 15)}),
(DayLocator, {'interval': 10}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 5}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 4}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 2}, '%Y:%j', DayLocator, {}),
(DayLocator, {'interval': 1}, '%Y:%j', HourLocator, {'byhour': (0, 6, 12, 18)}),
(HourLocator, {'byhour': list(range(0, 24, 12))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {'byhour': list(range(0, 24, 6))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {'byhour': list(range(0, 24, 4))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {'byhour': list(range(0, 24, 2))}, '%j:%H:00', HourLocator, {}),
(HourLocator, {}, '%j:%H:00', MinuteLocator, {'byminute': (0, 15, 30, 45)}),
(MinuteLocator, {'byminute': (0, 30)}, '%j:%H:%M', MinuteLocator, {'byminute': list(range(0,60,5))}),
(MinuteLocator, {'byminute': (0, 15, 30, 45)}, '%j:%H:%M', MinuteLocator, {'byminute': list(range(0,60,5))}),
(MinuteLocator, {'byminute': list(range(0, 60, 10))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {'byminute': list(range(0, 60, 5))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {'byminute': list(range(0, 60, 4))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {'byminute': list(range(0, 60, 2))}, '%j:%H:%M', MinuteLocator, {}),
(MinuteLocator, {}, '%j:%H:%M', SecondLocator, {'bysecond': (0, 15, 30, 45)}),
(SecondLocator, {'bysecond': (0, 30)}, '%H:%M:%S', SecondLocator, {'bysecond': list(range(0,60,5))}),
(SecondLocator, {'bysecond': (0, 15, 30, 45)}, '%H:%M:%S', SecondLocator, {'bysecond': list(range(0,60,5))}),
(SecondLocator, {'bysecond': list(range(0, 60, 10))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {'bysecond': list(range(0, 60, 5))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {'bysecond': list(range(0, 60, 4))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {'bysecond': list(range(0, 60, 2))}, '%H:%M:%S', SecondLocator, {}),
(SecondLocator, {}, '%H:%M:%S', SecondLocator, {}),
)
def set_time_ticks(plt, ticklocs=None):
"""
Pick nice values to show time ticks in a date plot.
Example::
x = cxctime2plotdate(np.linspace(0, 3e7, 20))
y = np.random.normal(size=len(x))
fig = pylab.figure()
plt = fig.add_subplot(1, 1, 1)
plt.plot_date(x, y, fmt='b-')
ticklocs = set_time_ticks(plt)
fig.autofmt_xdate()
fig.show()
The returned value of ``ticklocs`` can be used in subsequent date plots to
force the same major and minor tick locations and formatting. Note also
the use of the high-level fig.autofmt_xdate() convenience method to configure
vertically stacked date plot(s) to be well-formatted.
:param plt: ``matplotlib.axes.AxesSubplot`` object (from ``pylab.figure.add_subplot``)
:param ticklocs: list of major/minor tick locators ala the default ``TICKLOCS``
:rtype: tuple with selected ticklocs as first element
"""
locs = ticklocs or TICKLOCS
for majorLoc, major_kwargs, major_fmt, minorLoc, minor_kwargs in locs:
plt.xaxis.set_major_locator(majorLoc(**major_kwargs))
plt.xaxis.set_minor_locator(minorLoc(**minor_kwargs))
plt.xaxis.set_major_formatter(DateFormatter(major_fmt))
majorticklocs = plt.xaxis.get_ticklocs()
if len(majorticklocs) >= 5:
break
return ((majorLoc, major_kwargs, major_fmt, minorLoc, minor_kwargs), )
def remake_ticks(ax):
"""Remake the date ticks for the current plot if space is pressed. If '0'
is pressed then set the date ticks to the maximum possible range.
"""
ticklocs = set_time_ticks(ax)
ax.figure.canvas.draw()
def plot_cxctime(times, y, fmt='-b', fig=None, ax=None, yerr=None, xerr=None, tz=None,
state_codes=None, interactive=True, **kwargs):
"""Make a date plot where the X-axis values are in a CXC time compatible format. If no ``fig``
value is supplied then the current figure will be used (and created
automatically if needed). If yerr or xerr is supplied, ``errorbar()`` will
be called and any additional keyword arguments will be passed to it.
Otherwise any additional keyword arguments (e.g. ``fmt='b-'``) are passed
through to the ``plot()`` function. Also see ``errorbar()`` for an
explanation of the possible forms of *yerr*/*xerr*.
If the ``state_codes`` keyword argument is provided then the y-axis ticks and
tick labels will be set accordingly. The ``state_codes`` value must be a list
of (raw_count, state_code) tuples, and is normally set to ``msid.state_codes``
for an MSID object from fetch().
If the ``interactive`` keyword is True (default) then the plot will be redrawn
at the end and a GUI callback will be created which allows for on-the-fly
update of the date tick labels when panning and zooming interactively. Set
this to False to improve the speed when making several plots. This will likely
require issuing a plt.draw() or fig.canvas.draw() command at the end.
:param times: CXC time values for x-axis (DateTime compatible format, CxoTime)
:param y: y values
:param fmt: plot format (default = '-b')
:param fig: pyplot figure object (optional)
:param yerr: error on y values, may be [ scalar | N, Nx1, or 2xN array-like ]
:param xerr: error on x values in units of DAYS (may be [ scalar | N, Nx1, or 2xN array-like ] )
:param tz: timezone string
:param state_codes: list of (raw_count, state_code) tuples
:param interactive: use plot interactively (default=True, faster if False)
:param ``**kwargs``: keyword args passed through to ``plot_date()`` or ``errorbar()``
:rtype: ticklocs, fig, ax = tick locations, figure, and axes object.
"""
from matplotlib import pyplot
if fig is None:
fig = pyplot.gcf()
if ax is None:
ax = fig.gca()
if yerr is not None or xerr is not None:
ax.errorbar(time2plotdate(times), y, yerr=yerr, xerr=xerr, fmt=fmt, **kwargs)
ax.xaxis_date(tz)
else:
ax.plot_date(time2plotdate(times), y, fmt=fmt, **kwargs)
ticklocs = set_time_ticks(ax)
fig.autofmt_xdate()
if state_codes is not None:
counts, codes = zip(*state_codes)
ax.yaxis.set_major_locator(FixedLocator(counts))
ax.yaxis.set_major_formatter(FixedFormatter(codes))
# If plotting interactively then show the figure and enable interactive resizing
if interactive and hasattr(fig, 'show'):
fig.canvas.draw()
ax.callbacks.connect('xlim_changed', remake_ticks)
return ticklocs, fig, ax
def time2plotdate(times):
"""
Convert input CXC time (sec) to the time base required for the matplotlib
plot_date function (days since start of year 1)?
:param times: times (any DateTime compatible format or object)
:rtype: plot_date times
"""
# # Convert times to float array of CXC seconds
# if isinstance(times, (Time, Time)):
# times = times.unix
# else:
times = np.asarray(times)
# If not floating point then use CxoTime to convert to seconds
# if times.dtype.kind != 'f':
# times = Time(times).unix
# Find the plotdate of first time and use a relative offset from there
t0 = Time(times[0], format='unix').unix
plotdate0 = epoch2num(t0)
return (times - times[0]) / 86400. + plotdate0
def pointpair(x, y=None):
"""Interleave and then flatten two arrays ``x`` and ``y``. This is
typically useful for making a histogram style plot where ``x`` and ``y``
are the bin start and stop respectively. If no value for ``y`` is provided then
``x`` is used.
Example::
from Ska.Matplotlib import pointpair
x = np.arange(1, 100, 5)
x0 = x[:-1]
x1 = x[1:]
y = np.random.uniform(len(x0))
xpp = pointpair(x0, x1)
ypp = pointpair(y)
plot(xpp, ypp)
:x: left edge value of point pairs
:y: right edge value of point pairs (optional)
:rtype: np.array of length 2*len(x) == 2*len(y)
"""
if y is None:
y = x
return np.array([x, y]).reshape(-1, order='F')
def hist_outline(dataIn, *args, **kwargs):
"""
histOutline from http://www.scipy.org/Cookbook/Matplotlib/UnfilledHistograms
Make a histogram that can be plotted with plot() so that
the histogram just has the outline rather than bars as it
usually does.
Example Usage:
binsIn = np.arange(0, 1, 0.1)
angle = pylab.rand(50)
(bins, data) = histOutline(binsIn, angle)
plot(bins, data, 'k-', linewidth=2)
"""
(histIn, binsIn) = np.histogram(dataIn, *args, **kwargs)
stepSize = binsIn[1] - binsIn[0]
bins = np.zeros(len(binsIn)*2 + 2, dtype=np.float)
data = np.zeros(len(binsIn)*2 + 2, dtype=np.float)
for bb in range(len(binsIn)):
bins[2*bb + 1] = binsIn[bb]
bins[2*bb + 2] = binsIn[bb] + stepSize
if bb < len(histIn):
data[2*bb + 1] = histIn[bb]
data[2*bb + 2] = histIn[bb]
bins[0] = bins[1]
bins[-1] = bins[-2]
data[0] = 0
data[-1] = 0
return (bins, data)
def get_stat(t0, t1, npix):
t0 = Time(t0)
t1 = Time(t1)
dt_days = t1 - t0
if dt_days > npix:
stat = 'daily'
elif dt_days * (24 * 60 / 5) > npix:
stat = '5min'
else:
stat = None
return stat
class MsidPlot(object):
"""Make an interactive plot for exploring the MSID data.
This method opens a new plot figure (or clears the current figure)
and plots the MSID ``vals`` versus ``times``. This plot can be
panned or zoomed arbitrarily and the data values will be fetched
from the archive as needed. Depending on the time scale, ``iplot``
will display either full resolution, 5-minute, or daily values.
For 5-minute and daily values the min and max values are also
plotted.
Once the plot is displayed and the window is selected by clicking in
it, the plot limits can be controlled by the usual methods (window
selection, pan / zoom). In addition following key commands are
recognized::
a: autoscale for full data range in x and y
m: toggle plotting of min/max values
p: pan at cursor x
y: toggle autoscaling of y-axis
z: zoom at cursor x
?: print help
Example::
dat = fetch.Msid('aoattqt1', '2011:001', '2012:001', stat='5min')
iplot = Ska.engarchive.MsidPlot(dat)
Caveat: the ``MsidPlot()`` class is not meant for use within scripts, and
may give unexpected results if used in combination with other plotting
commands directed at the same plot figure.
:param msid: MSID object
:param fmt: plot format for values (default="-b")
:param fmt_minmax: plot format for mins and maxes (default="-c")
:param plot_kwargs: additional plotting keyword args
"""
def __init__(self, msid, fmt='-b', fmt_minmax='-c', **plot_kwargs):
self.fig = plt.gcf()
self.fig.clf()
self.ax = self.fig.gca()
self.zoom = 4.0
self.msid = msid
self.fetch = msid.fetch
self.fmt = fmt
self.fmt_minmax = fmt_minmax
self.plot_kwargs = plot_kwargs
self.msidname = self.msid.msid
self.plot_mins = True
self.tstart = self.msid.times[0]
self.tstop = self.msid.times[-1]
self.scaley = True
# Make sure MSID is sampled at the correct density for initial plot
stat = get_stat(self.tstart, self.tstop, self.npix)
if stat != self.msid.stat:
self.msid = self.fetch.Msid(self.msidname, self.tstart, self.tstop,
stat=stat)
self.ax.set_autoscale_on(True)
self.draw_plot()
self.ax.set_autoscale_on(False)
plt.grid()
self.fig.canvas.mpl_connect('key_press_event', self.key_press)
@property
def npix(self):
dims = self.ax.axesPatch.get_window_extent().bounds
return int(dims[2] + 0.5)
def key_press(self, event):
if event.key in ['z', 'p'] and event.inaxes:
x0, x1 = self.ax.get_xlim()
dx = x1 - x0
xc = event.xdata
zoom = self.zoom if event.key == 'p' else 1.0 / self.zoom
new_x1 = zoom * (x1 - xc) + xc
new_x0 = new_x1 - zoom * dx
tstart = max(num2epoch(new_x0), MIN_TSTART_UNIX)
tstop = min(num2epoch(new_x1), MAX_TSTOP_UNIX)
new_x0 = epoch2num(tstart)
new_x1 = epoch2num(tstop)
self.ax.set_xlim(new_x0, new_x1)
self.ax.figure.canvas.draw_idle()
elif event.key == 'm':
for _ in range(len(self.ax.lines)):
self.ax.lines.pop()
self.plot_mins = not self.plot_mins
print('\nPlotting mins and maxes is {}'.format(
'enabled' if self.plot_mins else 'disabled'))
self.draw_plot()
elif event.key == 'a':
# self.fig.clf()
# self.ax = self.fig.gca()
self.ax.set_autoscale_on(True)
self.draw_plot()
self.ax.set_autoscale_on(False)
self.xlim_changed(None)
elif event.key == 'y':
self.scaley = not self.scaley
print('Autoscaling y axis is {}'.format(
'enabled' if self.scaley else 'disabled'))
self.draw_plot()
elif event.key == '?':
print("""
Interactive MSID plot keys:
a: autoscale for full data range in x and y
m: toggle plotting of min/max values
p: pan at cursor x
y: toggle autoscaling of y-axis
z: zoom at cursor x
?: print help
""")
def xlim_changed(self, event):
x0, x1 = self.ax.get_xlim()
self.tstart = Time(num2epoch(x0), format='unix').unix
self.tstop = Time(num2epoch(x1), format='unix').unix
stat = get_stat(self.tstart, self.tstop, self.npix)
if (self.tstart < self.msid.tstart or
self.tstop > self.msid.tstop or
stat != self.msid.stat):
dt = self.tstop - self.tstart
self.tstart -= dt / 4
self.tstop += dt / 4
self.msid = self.fetch.Msid(self.msidname, self.tstart, self.tstop,
stat=stat)
self.draw_plot()
def draw_plot(self):
msid = self.msid
for _ in range(len(self.ax.lines)):
self.ax.lines.pop()
# Force manual y scaling
scaley = self.scaley
if scaley:
ymin = None
ymax = None
ok = ((msid.times >= self.tstart) &
(msid.times <= self.tstop))
try:
self.ax.callbacks.disconnect(self.xlim_callback)
except AttributeError:
pass
if self.plot_mins and hasattr(self.msid, 'mins'):
plot_cxctime(msid.times, msid.mins, self.fmt_minmax,
ax=self.ax, fig=self.fig, **self.plot_kwargs)
plot_cxctime(msid.times, msid.maxes, self.fmt_minmax,
ax=self.ax, fig=self.fig, **self.plot_kwargs)
if scaley:
ymin = | np.min(msid.mins[ok]) | numpy.min |
import base64
import json
import os
import os.path as osp
import numpy as np
import PIL.Image
from labelme import utils
# labelme版本是3.16.7,版本不同可能会报错
# 视情况使用json文件对应的labelme版本
if __name__ == '__main__':
count = os.listdir("./before") # 原来json的文件夹位置
jpgs_path = "jpg" #生成jpg的文件夹位置
pngs_path = "png" #生成png的文件夹位置
classes = ["_background_","kongpao"] #按自定义标注的分类修改
# 按1,2,3... 而不是1,10,100读取
count.sort(key=lambda x: int(x.split('.')[0]))
if not os.path.exists(jpgs_path):
os.mkdir(jpgs_path)
if not os.path.exists(pngs_path):
os.mkdir(pngs_path)
for i in range(0, len(count)):
path = os.path.join("./before", count[i]) #按实际情况修改
if os.path.isfile(path) and path.endswith('json'):
data = json.load(open(path))
if data['imageData']:
imageData = data['imageData']
else:
imagePath = os.path.join(os.path.dirname(path), data['imagePath'])
with open(imagePath, 'rb') as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode('utf-8')
img = utils.img_b64_to_arr(imageData)
label_name_to_value = {'_background_': 0}
for shape in data['shapes']:
label_name = shape['label']
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
# label_values must be dense
label_values, label_names = [], []
for ln, lv in sorted(label_name_to_value.items(), key=lambda x: x[1]):
label_values.append(lv)
label_names.append(ln)
assert label_values == list(range(len(label_values)))
lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
PIL.Image.fromarray(img).save(osp.join(jpgs_path, count[i].split(".")[0]+'.jpg'))
new = np.zeros([ | np.shape(img) | numpy.shape |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.platform import googletest
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
class IpuXlaConvTest(xla_test.XLATestCase):
def testReductionMeanDim12(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [2, 7, 7, 32], name="a")
output = math_ops.reduce_mean(pa, axis=[1, 2])
fd = {pa: np.ones([2, 7, 7, 32])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([2, 32]))
def testReductionMeanDim03(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [2, 7, 7, 32], name="a")
output = math_ops.reduce_mean(pa, axis=[0, 3])
fd = {pa: np.ones([2, 7, 7, 32])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([7, 7]))
def testReductionMeanDim13(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [2, 7, 7, 32], name="a")
output = math_ops.reduce_mean(pa, axis=[1, 3])
fd = {pa: np.ones([2, 7, 7, 32])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([2, 7]))
def testReductionMeanDim23(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [2, 7, 7, 32], name="a")
output = math_ops.reduce_mean(pa, axis=[2, 3])
fd = {pa: np.ones([2, 7, 7, 32])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([2, 7]))
def testAvgPoolSamePaddingWithStridesF32(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [1, 1, 10, 10], name="a")
output = nn.avg_pool(pa,
ksize=[1, 1, 5, 5],
strides=[1, 1, 2, 2],
data_format='NCHW',
padding='SAME',
name="avg")
fd = {pa: np.ones([1, 1, 10, 10])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([1, 1, 5, 5]))
def testAvgPoolSamePaddingWithStridesF16(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, [1, 1, 10, 10], name="a")
output = nn.avg_pool(pa,
ksize=[1, 1, 5, 5],
strides=[1, 1, 2, 2],
data_format='NCHW',
padding='SAME')
fd = {pa: np.ones([1, 1, 10, 10])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([1, 1, 5, 5]))
def testAvgPoolValidPaddingWithStridesF32(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [1, 1, 10, 10], name="a")
output = nn.avg_pool(pa,
ksize=[1, 1, 5, 5],
strides=[1, 1, 2, 2],
data_format='NCHW',
padding='VALID')
fd = {pa: np.ones([1, 1, 10, 10])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([1, 1, 3, 3]))
def testAvgPoolValidPaddingWithStridesF16(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float16, [1, 1, 10, 10], name="a")
output = nn.avg_pool(pa,
ksize=[1, 1, 5, 5],
strides=[1, 1, 2, 2],
data_format='NCHW',
padding='VALID')
fd = {pa: np.ones([1, 1, 10, 10])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([1, 1, 3, 3]))
def testMaxPoolSamePaddingWithStridesF32(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [1, 1, 10, 10], name="a")
output = nn.max_pool(pa,
ksize=[1, 1, 5, 5],
strides=[1, 1, 2, 2],
data_format='NCHW',
padding='SAME',
name="max")
fd = {pa: np.ones([1, 1, 10, 10])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([1, 1, 5, 5]))
def testMaxPoolValidPaddingWithStridesF32(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [1, 1, 10, 10], name="a")
output = nn.max_pool(pa,
ksize=[1, 1, 5, 5],
strides=[1, 1, 2, 2],
data_format='NCHW',
padding='VALID',
name="max")
fd = {pa: np.ones([1, 1, 10, 10])}
result = sess.run(output, fd)
self.assertAllClose(result, np.ones([1, 1, 3, 3]))
def testAvgPoolSamePaddingWithStridesF32Dim12(self):
with self.session() as sess:
with ops.device("/device:IPU:0"):
pa = array_ops.placeholder(np.float32, [1, 10, 10, 1], name="a")
output = nn.avg_pool(pa,
ksize=[1, 5, 5, 1],
strides=[1, 2, 2, 1],
data_format='NHWC',
padding='SAME',
name="avg")
fd = {pa: | np.ones([1, 10, 10, 1]) | numpy.ones |
from collections import OrderedDict
import george
from george import kernels
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from astropy.cosmology import FlatLambdaCDM
from scipy.optimize import minimize
from sklearn.model_selection import StratifiedKFold
from scipy.signal import find_peaks
from scipy.special import erf
import tqdm
from settings import settings
# Parameters of the dataset
num_passbands = 6
pad = 100
start_mjd = 59580 - pad
end_mjd = 60675 + pad
# Define class labels, galactic vs extragalactic label and weights
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99]
class_weights = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1,
67: 1, 88: 1, 90: 1, 92: 1, 95: 1, 99: 2}
class_galactic = {6: True, 15: False, 16: True, 42: False, 52: False, 53: True,
62: False, 64: False, 65: True, 67: False, 88: False, 90:
False, 92: True, 95: False}
# Reverse engineered cosmology used in sims
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
def find_time_to_fractions(fluxes, fractions, forward=True):
"""Find the time for a lightcurve to decline to a specific fraction of
maximum light.
fractions should be a decreasing list of the fractions of maximum light
that will be found (eg: [0.8, 0.5, 0.2]).
"""
max_time = np.argmax(fluxes)
max_flux = fluxes[max_time]
result = np.ones(len(fractions)) * 99999
frac_idx = 0
# Start at maximum light, and move along the spectrum. Whenever we cross
# one threshold, we add it to the list and keep going. If we hit the end of
# the array without crossing the threshold, we return a large number for
# that time.
offset = 0
while True:
offset += 1
if forward:
new_time = max_time + offset
if new_time >= fluxes.shape:
break
else:
new_time = max_time - offset
if new_time < 0:
break
test_flux = fluxes[new_time]
while test_flux < max_flux * fractions[frac_idx]:
result[frac_idx] = offset
frac_idx += 1
if frac_idx == len(fractions):
break
if frac_idx == len(fractions):
break
return result
def multi_weighted_logloss(y_true, y_preds):
"""
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
if y_preds.shape[1] != len(classes):
# No prediction for 99, pretend that it doesn't exist.
use_classes = classes[:-1]
else:
use_classes = classes
y_p = y_preds
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weights[i] for i in use_classes])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return loss
def lgb_multi_weighted_logloss(y_true, y_preds):
"""Wrapper around multi_weighted_logloss that works with lgbm"""
y_p = y_preds.reshape(y_true.shape[0], len(classes) - 1, order='F')
loss = multi_weighted_logloss(y_true, y_p)
return 'wloss', loss, False
def do_predictions_flatprob(object_ids, features, classifiers):
pred = 0
for classifier in classifiers:
pred += (
classifier.predict_proba(
features, num_iteration=classifier.best_iteration_)
) / len(classifiers)
# Add in flat prediction for class 99. This prediction depends on whether
# the object is galactic or extragalactic.
gal_frac_99 = 0.04
# Weights without 99 included.
weight_gal = sum([class_weights[class_id] for class_id, is_gal in
class_galactic.items() if is_gal])
weight_extgal = sum([class_weights[class_id] for class_id, is_gal in
class_galactic.items() if not is_gal])
guess_99_gal = gal_frac_99 * class_weights[99] / weight_gal
guess_99_extgal = (1 - gal_frac_99) * class_weights[99] / weight_extgal
is_gals = features['hostgal_photoz'] == 0.
pred_99 = np.array([guess_99_gal if is_gal else guess_99_extgal for is_gal
in is_gals])
stack_pred = np.hstack([pred, pred_99[:, None]])
# Normalize
stack_pred = stack_pred / np.sum(stack_pred, axis=1)[:, None]
# Build a pandas dataframe with the result
df = pd.DataFrame(index=object_ids, data=stack_pred,
columns=['class_%d' % i for i in classes])
return df
def do_predictions(object_ids, features, classifiers, gal_outlier_score=0.25,
extgal_outlier_score=1.4):
print("OLD!!! DON'T USE!")
is_gal = features['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(features), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
pred = 0
for classifier in classifiers:
# Get base scores
raw_scores = classifier.predict_proba(
features, raw_score=True, num_iteration=classifier.best_iteration_
)
max_scores = np.max(raw_scores, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None,
max_scores)
# Add in class 99 scores.
scores = np.hstack([raw_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(scores) / np.sum(np.exp(scores), axis=1)[:, None]
pred += iter_pred / len(classifiers)
# Build a pandas dataframe with the result
df = pd.DataFrame(index=object_ids, data=pred,
columns=['class_%d' % i for i in classes])
return df
def do_scores(object_ids, features, classifiers):
scores = []
for classifier in classifiers:
scores.append(classifier.predict_proba(
features, raw_score=True,
num_iteration=classifier.best_iteration_))
scores = np.array(scores)
return scores
def convert_scores(meta, scores, gal_outlier_score=0.4,
extgal_outlier_score=1.4):
is_gal = meta['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(meta), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
pred = 0
for iter_scores in scores:
# Iterate over each classifier's scores if there were more than one.
# Get base scores
# max_scores = np.max(iter_scores, axis=1)[:, None]
max_scores = np.percentile(iter_scores, 100 * 12.5/13, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None, max_scores)
# Add in class 99 scores.
iter_full_scores = np.hstack([iter_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(iter_full_scores) / np.sum(np.exp(iter_full_scores),
axis=1)[:, None]
pred += iter_pred / len(scores)
print("Mean gal 99: %.5f" % np.mean(pred[is_gal, -1]))
print("Mean ext 99: %.5f" % np.mean(pred[~is_gal, -1]))
# Build a pandas dataframe with the result
df = pd.DataFrame(index=meta['object_id'], data=pred,
columns=['class_%d' % i for i in classes])
return df
def convert_scores_2(meta, scores, s2n, gal_outlier_score=-2.,
extgal_outlier_score=-0.8):
is_gal = meta['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(meta), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
base_class_99_scores[:, 0] += 1.5*np.log10(s2n)
pred = 0
for iter_scores in scores:
# Iterate over each classifier's scores if there were more than one.
# Get base scores
# max_scores = np.max(iter_scores, axis=1)[:, None]
max_scores = np.percentile(iter_scores, 100 * 12.5/13, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None, max_scores)
# Add in class 99 scores.
iter_full_scores = np.hstack([iter_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(iter_full_scores) / np.sum(np.exp(iter_full_scores),
axis=1)[:, None]
pred += iter_pred / len(scores)
print("Mean gal 99: %.5f" % np.mean(pred[is_gal, -1]))
print("Mean ext 99: %.5f" % np.mean(pred[~is_gal, -1]))
# Build a pandas dataframe with the result
df = pd.DataFrame(index=meta['object_id'], data=pred,
columns=['class_%d' % i for i in classes])
return df
def fit_classifier(train_x, train_y, train_weights, eval_x=None, eval_y=None,
eval_weights=None, **kwargs):
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.05,
# 'bagging_fraction': .75,
# 'bagging_freq': 5,
'colsample_bytree': .5,
'reg_alpha': 0.,
'reg_lambda': 0.,
'min_split_gain': 10.,
'min_child_weight': 2000.,
'n_estimators': 5000,
'silent': -1,
'verbose': -1,
'max_depth': 7,
'num_leaves': 50,
}
lgb_params.update(kwargs)
fit_params = {
'verbose': 100,
'sample_weight': train_weights,
}
if eval_x is not None:
fit_params['eval_set'] = [(eval_x, eval_y)]
fit_params['eval_metric'] = lgb_multi_weighted_logloss
fit_params['early_stopping_rounds'] = 50
fit_params['eval_sample_weight'] = [eval_weights]
classifier = lgb.LGBMClassifier(**lgb_params)
classifier.fit(train_x, train_y, **fit_params)
return classifier
class Dataset(object):
def __init__(self):
"""Class to represent part of the PLAsTiCC dataset.
This class can load either the training or validation data, can produce
features and then can create outputs. The features can also be loaded
from a file to avoid having to recalculate them every time. Not
everything has to be loaded at once, but some functions might not work
if that is the case. I haven't put in the effort to make everything
safe with regards to random calls, so if something breaks you probably
just need to load the data that it needs.
"""
self.flux_data = None
self.meta_data = None
self.features = None
self.dataset_name = None
# Update this whenever the feature calculation code is updated.
self._features_version = settings['FEATURES_VERSION']
# Update this whenever the augmentation code is updated.
self._augment_version = settings['AUGMENT_VERSION']
def load_training_data(self):
"""Load the training dataset."""
self.flux_data = pd.read_csv(settings['RAW_TRAINING_PATH'])
self.meta_data = pd.read_csv(settings["RAW_TRAINING_METADATA_PATH"])
# Label folds
y = self.meta_data['target']
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
kfold_indices = -1*np.ones(len(y))
for idx, (fold_train, fold_val) in enumerate(folds.split(y, y)):
kfold_indices[fold_val] = idx
self.meta_data['fold'] = kfold_indices
self.dataset_name = 'train'
def load_test_data(self):
"""Load the test metadata."""
self.meta_data = pd.read_csv(settings["RAW_TEST_METADATA_PATH"])
self.dataset_name = 'test'
def load_chunk(self, chunk_idx, load_flux_data=True):
"""Load a chunk from the test dataset.
I previously split up the dataset into smaller files that can be read
into memory.
By default, the flux data is loaded which takes a long time. That can
be turned off if desired.
"""
path = settings["SPLIT_TEST_PATH_FORMAT"] % chunk_idx
if load_flux_data:
self.flux_data = pd.read_hdf(path, 'df')
self.meta_data = pd.read_hdf(path, 'meta')
self.dataset_name = 'test_%04d' % chunk_idx
def load_augment(self, num_augments, base_name='train'):
"""Load an augmented dataset."""
dataset_name = '%s_augment_v%d_%d' % (
base_name, self._augment_version, num_augments
)
path = '%s/%s.h5' % (settings['AUGMENT_DIR'], dataset_name)
self.flux_data = pd.read_hdf(path, 'df')
self.meta_data = pd.read_hdf(path, 'meta')
self.dataset_name = dataset_name
@property
def features_path(self):
"""Path to the features file for this dataset"""
features_path = settings['FEATURES_PATH_FORMAT'] % (
self._features_version, self.dataset_name)
return features_path
def load_simple_features(self):
"""Load the features for a dataset and postprocess them.
This assumes that the features have already been created.
"""
self.raw_features = pd.read_hdf(self.features_path)
rf = self.raw_features
# Keys that we want to use in the prediction.
use_keys = [
'hostgal_photoz',
'hostgal_photoz_err',
'count',
]
features = rf[use_keys].copy()
features['length_scale'] = rf['gp_fit_1']
features['max_flux'] = rf['max_flux_3']
features['max_flux_ratio_r'] = (
(rf['max_flux_5'] - rf['max_flux_3']) /
(np.abs(rf['max_flux_5']) + np.abs(rf['max_flux_3']))
)
features['max_flux_ratio_b'] = (
(rf['max_flux_3'] - rf['max_flux_0']) /
(np.abs(rf['max_flux_3']) + np.abs(rf['max_flux_0']))
)
features['min_flux'] = rf['min_flux_3']
features['min_flux_ratio_r'] = (
(rf['min_flux_5'] - rf['min_flux_3']) /
(np.abs(rf['min_flux_5']) + np.abs(rf['min_flux_3']))
)
features['min_flux_ratio_b'] = (
(rf['min_flux_3'] - rf['min_flux_0']) /
(np.abs(rf['min_flux_3']) + np.abs(rf['min_flux_0']))
)
features['max_dt'] = rf['max_dt_5'] - rf['max_dt_0']
features['positive_width'] = rf['positive_width_3']
features['negative_width'] = rf['negative_width_3']
features['frac_time_fwd_0.8'] = rf['frac_time_fwd_0.8_3']
features['frac_time_fwd_0.5'] = rf['frac_time_fwd_0.5_3']
features['frac_time_fwd_0.2'] = rf['frac_time_fwd_0.2_3']
features['ratio_r_time_fwd_0.8'] = (
rf['frac_time_fwd_0.8_3'] / rf['frac_time_fwd_0.8_5'])
features['ratio_b_time_fwd_0.8'] = (
rf['frac_time_fwd_0.8_3'] / rf['frac_time_fwd_0.8_0'])
features['ratio_r_time_fwd_0.5'] = (
rf['frac_time_fwd_0.5_3'] / rf['frac_time_fwd_0.5_5'])
features['ratio_b_time_fwd_0.5'] = (
rf['frac_time_fwd_0.5_3'] / rf['frac_time_fwd_0.5_0'])
features['ratio_r_time_fwd_0.2'] = (
rf['frac_time_fwd_0.2_3'] / rf['frac_time_fwd_0.2_5'])
features['ratio_b_time_fwd_0.5'] = (
rf['frac_time_fwd_0.2_3'] / rf['frac_time_fwd_0.2_0'])
features['frac_time_bwd_0.8'] = rf['frac_time_bwd_0.8_3']
features['frac_time_bwd_0.5'] = rf['frac_time_bwd_0.5_3']
features['frac_time_bwd_0.2'] = rf['frac_time_bwd_0.2_3']
features['ratio_r_time_bwd_0.8'] = (
rf['frac_time_bwd_0.8_3'] / rf['frac_time_bwd_0.8_5'])
features['ratio_b_time_bwd_0.8'] = (
rf['frac_time_bwd_0.8_3'] / rf['frac_time_bwd_0.8_0'])
features['ratio_r_time_bwd_0.5'] = (
rf['frac_time_bwd_0.5_3'] / rf['frac_time_bwd_0.5_5'])
features['ratio_b_time_bwd_0.5'] = (
rf['frac_time_bwd_0.5_3'] / rf['frac_time_bwd_0.5_0'])
features['ratio_r_time_bwd_0.2'] = (
rf['frac_time_bwd_0.2_3'] / rf['frac_time_bwd_0.2_5'])
features['ratio_b_time_bwd_0.5'] = (
rf['frac_time_bwd_0.2_3'] / rf['frac_time_bwd_0.2_0'])
features['frac_s2n_5'] = rf['count_s2n_5'] / rf['count']
features['frac_s2n_-5'] = rf['count_s2n_-5'] / rf['count']
features['frac_background'] = rf['frac_background']
features['time_width_s2n_5'] = rf['time_width_s2n_5']
features['count_max_center'] = rf['count_max_center']
features['count_max_rise_20'] = rf['count_max_rise_20']
features['count_max_rise_50'] = rf['count_max_rise_50']
features['count_max_rise_100'] = rf['count_max_rise_100']
features['count_max_fall_20'] = rf['count_max_fall_20']
features['count_max_fall_50'] = rf['count_max_fall_50']
features['count_max_fall_100'] = rf['count_max_fall_100']
features['num_peaks'] = np.nanmedian([
rf['peaks_pos_0_count'],
rf['peaks_pos_1_count'],
rf['peaks_pos_2_count'],
rf['peaks_pos_3_count'],
rf['peaks_pos_4_count'],
rf['peaks_pos_5_count']
], axis=0)
features['peak_frac_2'] = np.nanmedian([
rf['peaks_pos_0_frac_2'],
rf['peaks_pos_1_frac_2'],
rf['peaks_pos_2_frac_2'],
rf['peaks_pos_3_frac_2'],
rf['peaks_pos_4_frac_2'],
rf['peaks_pos_5_frac_2']
], axis=0)
features['peak_frac_3'] = np.nanmedian([
rf['peaks_pos_0_frac_3'],
rf['peaks_pos_1_frac_3'],
rf['peaks_pos_2_frac_3'],
rf['peaks_pos_3_frac_3'],
rf['peaks_pos_4_frac_3'],
rf['peaks_pos_5_frac_3']
], axis=0)
features['total_s2n'] = (
rf['total_s2n_0'] +
rf['total_s2n_1'] +
rf['total_s2n_2'] +
rf['total_s2n_3'] +
rf['total_s2n_4'] +
rf['total_s2n_5']
)
self.features = features
def load_features(self):
"""Load the features for a dataset and postprocess them.
This assumes that the features have already been created.
"""
self.raw_features = pd.read_hdf(self.features_path)
# Drop keys that we don't want to use in the prediction
drop_keys = [
'object_id',
'hostgal_specz',
# 'hostgal_photoz',
# 'distmod',
'ra',
'decl',
'gal_l',
'gal_b',
'mwebv',
'ddf',
'max_time',
# 'hostgal_photoz',
]
features = self.raw_features
for key in drop_keys:
try:
features = features.drop(key, 1)
except KeyError:
# Key doesn't exist in this version. Ignore it.
pass
self.features = features
def _get_gp_data(self, object_meta, object_data, subtract_median=True):
times = []
fluxes = []
bands = []
flux_errs = []
# The zeropoints were arbitrarily set from the first image. Pick the
# 20th percentile of all observations in each channel as a new
# zeropoint. This has good performance when there are supernova-like
# bursts in the image, even if they are quite wide.
# UPDATE: when picking the 20th percentile, observations with just
# noise get really messed up. Revert back to the median for now and see
# if that helps. It doesn't really matter if supernovae go slightly
# negative...
for passband in range(num_passbands):
band_data = object_data[object_data['passband'] == passband]
if len(band_data) == 0:
# No observations in this band
continue
# ref_flux = np.percentile(band_data['flux'], 20)
ref_flux = np.median(band_data['flux'])
for idx, row in band_data.iterrows():
times.append(row['mjd'] - start_mjd)
flux = row['flux']
if subtract_median:
flux = flux - ref_flux
fluxes.append(flux)
bands.append(passband)
flux_errs.append(row['flux_err'])
times = np.array(times)
bands = np.array(bands)
fluxes = np.array(fluxes)
flux_errs = np.array(flux_errs)
# Guess the scale based off of the highest signal-to-noise point.
# Sometimes the edge bands are pure noise and can have large
# insignificant points.
scale = fluxes[np.argmax(fluxes / flux_errs)]
gp_data = {
'meta': object_meta,
'times': times,
'bands': bands,
'scale': scale,
'fluxes': fluxes,
'flux_errs': flux_errs,
}
return gp_data
def get_gp_data(self, idx, target=None, verbose=False,
subtract_median=True):
if target is not None:
target_data = self.meta_data[self.meta_data['target'] == target]
object_meta = target_data.iloc[idx]
else:
object_meta = self.meta_data.iloc[idx]
if verbose:
print(object_meta)
object_id = object_meta['object_id']
object_data = self.flux_data[self.flux_data['object_id'] == object_id]
return self._get_gp_data(object_meta, object_data)
def fit_gp(self, idx=None, target=None, object_meta=None, object_data=None,
verbose=False, guess_length_scale=20., fix_scale=False):
if idx is not None:
# idx was specified, pull from the internal data
gp_data = self.get_gp_data(idx, target, verbose)
else:
# The meta data and flux data can also be directly specified.
gp_data = self._get_gp_data(object_meta, object_data)
# GP kernel. We use a 2-dimensional Matern kernel to model the
# transient. The kernel amplitude is fixed to a fraction of the maximum
# value in the data, and the kernel width in the wavelength direction
# is also fixed. We fit for the kernel width in the time direction as
# different transients evolve on very different time scales.
kernel = ((0.2*gp_data['scale'])**2 *
kernels.Matern32Kernel([guess_length_scale**2, 5**2],
ndim=2))
# print(kernel.get_parameter_names())
if fix_scale:
kernel.freeze_parameter('k1:log_constant')
kernel.freeze_parameter('k2:metric:log_M_1_1')
gp = george.GP(kernel)
if verbose:
print(kernel.get_parameter_dict())
x_data = np.vstack([gp_data['times'], gp_data['bands']]).T
gp.compute(x_data, gp_data['flux_errs'])
fluxes = gp_data['fluxes']
def neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.log_likelihood(fluxes)
def grad_neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(fluxes)
# print(np.exp(gp.get_parameter_vector()))
bounds = [(0, np.log(1000**2))]
if not fix_scale:
bounds = [(-30, 30)] + bounds
fit_result = minimize(
neg_ln_like,
gp.get_parameter_vector(),
jac=grad_neg_ln_like,
# bounds=[(-30, 30), (0, 10), (0, 5)],
# bounds=[(0, 10), (0, 5)],
bounds=bounds,
# bounds=[(-30, 30), (0, np.log(1000**2))],
# options={'ftol': 1e-4}
)
if not fit_result.success:
print("Fit failed for %d!" % idx)
# print(-gp.log_likelihood(fluxes))
# print(np.exp(fit_result.x))
gp.set_parameter_vector(fit_result.x)
if verbose:
print(fit_result)
print(kernel.get_parameter_dict())
pred = []
pred_times = np.arange(end_mjd - start_mjd + 1)
for band in range(6):
pred_bands = np.ones(len(pred_times)) * band
pred_x_data = np.vstack([pred_times, pred_bands]).T
# pred, pred_var = gp.predict(fluxes, pred_x_data, return_var=True)
# band_pred, pred_var = gp.predict(fluxes, pred_x_data,
# return_var=True)
# band_pred = gp.predict(fluxes, pred_x_data, return_var=False)
band_pred = gp.predict(fluxes, pred_x_data, return_cov=False)
pred.append(band_pred)
pred = np.array(pred)
# Add results of the GP fit to the gp_data dictionary.
gp_data['pred_times'] = pred_times
gp_data['pred'] = pred
gp_data['fit_parameters'] = fit_result.x
return gp_data
def plot_gp(self, *args, **kwargs):
result = self.fit_gp(*args, **kwargs)
plt.figure()
for band in range(num_passbands):
cut = result['bands'] == band
color = 'C%d' % band
plt.errorbar(result['times'][cut], result['fluxes'][cut],
result['flux_errs'][cut], fmt='o', c=color)
plt.plot(result['pred_times'], result['pred'][band], c=color,
label=band)
plt.legend()
def plot_gp_interactive(self):
"""Make an interactive plot of the GP output.
This requires the ipywidgets package to be set up, and has only been
tested in jupyter-lab.
"""
from ipywidgets import interact, IntSlider, Dropdown, fixed
targets = np.unique(self.meta_data['target'])
idx_widget = IntSlider(min=0, max=1)
target_widget = Dropdown(options=targets, index=0)
def update_idx_range(*args):
idx_widget.max = np.sum(self.meta_data['target'] ==
target_widget.value) - 1
target_widget.observe(update_idx_range, 'value')
update_idx_range()
interact(self.plot_gp, idx=idx_widget, target=target_widget,
object_meta=fixed(None), object_data=fixed(None))
def extract_features(self, *args, **kwargs):
"""Extract features from a target"""
features = OrderedDict()
# Fit the GP and produce an output model
gp_data = self.fit_gp(*args, **kwargs)
times = gp_data['times']
fluxes = gp_data['fluxes']
flux_errs = gp_data['flux_errs']
bands = gp_data['bands']
s2ns = fluxes / flux_errs
pred = gp_data['pred']
meta = gp_data['meta']
# Add the object id. This shouldn't be used for training a model, but
# is necessary to identify which row is which when we split things up.
features['object_id'] = meta['object_id']
# Features from the meta data
features['hostgal_specz'] = meta['hostgal_specz']
features['hostgal_photoz'] = meta['hostgal_photoz']
features['hostgal_photoz_err'] = meta['hostgal_photoz_err']
features['ra'] = meta['ra']
features['decl'] = meta['decl']
features['gal_l'] = meta['gal_l']
features['gal_b'] = meta['gal_b']
features['distmod'] = meta['distmod']
features['mwebv'] = meta['mwebv']
features['ddf'] = meta['ddf']
# Count how many observations there are
features['count'] = len(fluxes)
# Features from GP fit parameters
for i, fit_parameter in enumerate(gp_data['fit_parameters']):
features['gp_fit_%d' % i] = fit_parameter
# Maximum fluxes and times.
max_times = np.argmax(pred, axis=1)
med_max_time = np.median(max_times)
max_dts = max_times - med_max_time
max_fluxes = np.array([pred[band, time] for band, time in
enumerate(max_times)])
features['max_time'] = med_max_time
for band, (max_flux, max_dt) in enumerate(zip(max_fluxes, max_dts)):
features['max_flux_%d' % band] = max_flux
features['max_dt_%d' % band] = max_dt
# Minimum fluxes.
min_fluxes = np.min(pred, axis=1)
for band, min_flux in enumerate(min_fluxes):
features['min_flux_%d' % band] = min_flux
# Calculate the positive and negative integrals of the lightcurve,
# normalized to the respective peak fluxes. This gives a measure of the
# "width" of the lightcurve, even for non-bursty objects.
positive_widths = np.sum(np.clip(pred, 0, None), axis=1) / max_fluxes
negative_widths = np.sum(np.clip(pred, None, 0), axis=1) / min_fluxes
for band in range(num_passbands):
features['positive_width_%d' % band] = positive_widths[band]
features['negative_width_%d' % band] = negative_widths[band]
# Find times to fractions of the peak amplitude
fractions = [0.8, 0.5, 0.2]
for band in range(num_passbands):
forward_times = find_time_to_fractions(pred[band], fractions)
backward_times = find_time_to_fractions(pred[band], fractions,
forward=False)
for fraction, forward_time, backward_time in \
zip(fractions, forward_times, backward_times):
features['frac_time_fwd_%.1f_%d' % (fraction, band)] = \
forward_time
features['frac_time_bwd_%.1f_%d' % (fraction, band)] = \
backward_time
# Count the number of data points with significant positive/negative
# fluxes
thresholds = [-20, -10, -5, -3, 3, 5, 10, 20]
for threshold in thresholds:
if threshold < 0:
count = np.sum(s2ns < threshold)
else:
count = np.sum(s2ns > threshold)
features['count_s2n_%d' % threshold] = count
# Count the fraction of data points that are "background", i.e. less
# than a 3 sigma detection of something.
features['frac_background'] = np.sum(np.abs(s2ns) < 3) / len(s2ns)
# Sum up the total signal-to-noise in each band
for band in range(6):
mask = bands == band
band_fluxes = fluxes[mask]
band_flux_errs = flux_errs[mask]
total_band_s2n = np.sqrt(np.sum((band_fluxes / band_flux_errs)**2))
features['total_s2n_%d' % band] = total_band_s2n
# Count the time delay between the first and last significant fluxes
thresholds = [5, 10, 20]
for threshold in thresholds:
significant_times = times[ | np.abs(s2ns) | numpy.abs |
"""
Created on 25/05/2015
@author: vgil
"""
from optparse import OptionParser
from anmichelpers.parsers.pronmd import ProdyNMDParser
import numpy
from anmichelpers.writers.pronmd import ProdyNMDWriter
def fill_with_zeros(evecs, from_res, to_res, width):
"""
Pads an eigenvector array with 0s .
from_res starts from 1
"""
left_padding = [0.]*((from_res-1)*3)
right_padding = [0.]*((width-to_res)*3)
new_evecs = []
for evec in evecs:
new_evec = []
new_evec.extend(left_padding)
new_evec.extend(evec)
new_evec.extend(right_padding)
new_evecs.append(new_evec)
return | numpy.array(new_evecs) | numpy.array |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/pyro_intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="BGe00SEV5KTl"
# [Pyro](https://pyro.ai/) is a probabilistic programming system built on top of PyTorch. It supports posterior inference based on MCMC and stochastic variational inference; discrete latent variables can be marginalized out exactly using dynamic programmming.
# + colab={"base_uri": "https://localhost:8080/"} id="P_uM0GuF5ZWr" outputId="4cafba49-31b3-4fcb-d5e5-a151912d2224"
# !pip install pyro-ppl
# + id="3SBZ5Pmr5F0N"
import matplotlib.pyplot as plt
import numpy as np
import torch
import pyro
import pyro.infer
import pyro.optim
import pyro.distributions as dist
from torch.distributions import constraints
from pyro.infer import MCMC, NUTS, Predictive, HMC
from pyro.infer import SVI, Trace_ELBO
from pyro.infer import EmpiricalMarginal
from pyro.distributions import Beta, Binomial, HalfCauchy, Normal, Pareto, Uniform
from pyro.distributions.util import scalar_like
from pyro.infer.mcmc.util import initialize_model, summary
from pyro.util import ignore_experimental_warning
pyro.set_rng_seed(101)
# + [markdown] id="FE8OW3Zd50Nn"
# # Example: inferring mean of 1d Gaussian .
#
# We use the simple example from the [Pyro intro](https://pyro.ai/examples/intro_part_ii.html#A-Simple-Example). The goal is to infer the weight $\theta$ of an object, given noisy measurements $y$. We assume the following model:
# $$
# \begin{align}
# \theta &\sim N(\mu=8.5, \tau^2=1.0)\\
# y \sim &N(\theta, \sigma^2=0.75^2)
# \end{align}
# $$
#
# Where $\mu=8.5$ is the initial guess.
#
#
# + id="hYMFFGAMV0fW"
def model(hparams, data=None):
prior_mean, prior_sd, obs_sd = hparams
theta = pyro.sample("theta", dist.Normal(prior_mean, prior_sd))
y = pyro.sample("y", dist.Normal(theta, obs_sd), obs=data)
return y
# + [markdown] id="4511pkTB9GYC"
# ## Exact inference
#
# By Bayes rule for Gaussians, we know that the exact posterior,
# given a single observation $y=9.5$, is given by
#
#
# $$
# \begin{align}
# \theta|y &\sim N(m, s^s) \\
# m &=\frac{\sigma^2 \mu + \tau^2 y}{\sigma^2 + \tau^2}
# = \frac{0.75^2 \times 8.5 + 1 \times 9.5}{0.75^2 + 1^2}
# = 9.14 \\
# s^2 &= \frac{\sigma^2 \tau^2}{\sigma^2 + \tau^2}
# = \frac{0.75^2 \times 1^2}{0.75^2 + 1^2}= 0.6^2
# \end{align}
# $$
# + colab={"base_uri": "https://localhost:8080/"} id="qNZ4aNNj9M-2" outputId="2d2ee5cf-2dc1-49da-93fa-574577b812f8"
mu = 8.5; tau = 1.0; sigma = 0.75;
hparams = (mu, tau, sigma)
y = 9.5
m = (sigma**2 * mu + tau**2 * y)/(sigma**2 + tau**2) # posterior mean
s2 = (sigma**2 * tau**2)/(sigma**2 + tau**2) # posterior variance
s = np.sqrt(s2)
print(m)
print(s)
# + [markdown] id="tFIu6O-H8YFM"
# ## Ancestral sampling
# + colab={"base_uri": "https://localhost:8080/"} id="6dMY6oxJ8bEZ" outputId="3bb6bb9a-1793-4f92-aa9c-bbc4998064e0"
def model2(hparams, data=None):
prior_mean, prior_sd, obs_sd = hparams
theta = pyro.sample("theta", dist.Normal(prior_mean, prior_sd))
y = pyro.sample("y", dist.Normal(theta, obs_sd), obs=data)
return theta, y
for i in range(5):
theta, y = model2(hparams)
print([theta, y])
# + [markdown] id="r6ZtMB_cGhz4"
# ## MCMC
#
# See [the documentation](http://docs.pyro.ai/en/stable/mcmc.html)
#
# + colab={"base_uri": "https://localhost:8080/"} id="WQIhsROHH4uG" outputId="1ec3aa8e-dbdb-4f97-e626-21cb3f69c6e5"
nuts_kernel = NUTS(model)
obs = torch.tensor(y)
mcmc = MCMC(nuts_kernel, num_samples=1000, warmup_steps=50)
mcmc.run(hparams, obs)
print(type(mcmc))
# + colab={"base_uri": "https://localhost:8080/"} id="K46LfO1rR-ty" outputId="ad6f1d3e-d8a3-4341-8fc7-93aec9c403a0"
samples = mcmc.get_samples()
print(type(samples))
print(samples.keys())
print(samples['theta'].shape)
# + colab={"base_uri": "https://localhost:8080/"} id="E1go0L6Szh-f" outputId="1b34fe04-bc90-4366-ae88-630093ba5f40"
mcmc.diagnostics()
# + colab={"base_uri": "https://localhost:8080/"} id="MPA4YwjaSrkp" outputId="5abaf70f-ac02-4165-be54-5610bb473471"
thetas = samples['theta'].numpy()
print( | np.mean(thetas) | numpy.mean |
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import numpy.ma as ma
#Applies a boxcar smooth of length nsmooth to the vector x
#returns the smoothed vector
def smooth(x, nsmooth):
#interpolates over masked values
if (sum(x==0)> 0)&(sum(x)>0):
bpix = x==0.0
gpix = ~bpix
gx = x[gpix]
interp = np.interp(bpix.nonzero()[0], gpix.nonzero()[0], gx)
x[bpix] = np.float32(interp)
return scipy.signal.medfilt(x, nsmooth) #median filters the data
def diagnostics_plot(D, M, indmax, outlier_array, f_opt, profile):
indmax = np.argmax(outlier_array) #finds biggest outlier
indmax = unravel_index(indmax, outlier_array.shape) #converts it from flat to tuple
plt.subplot(221)
plt.title("Raw Data")
plt.imshow(D,vmin=0, vmax=50)
plt.scatter(x = indmax[1], y = indmax[0], color='w', marker='x')
m = cm.ScalarMappable(cmap= cm.jet)
m.set_array(D)
plt.colorbar(m)
plt.subplot(222)
plt.title("Outliers")
plt.imshow(M*outlier_array, vmin=0, vmax=20)
plt.scatter(x = indmax[1], y = indmax[0], color='w', marker='x')
m.set_array(M*outlier_array)
plt.colorbar(m)
plt.subplot(222)
plt.subplot(223)
plt.title("Cut in spatial direction")
plt.axvline(x = indmax[0], color="red")
plt.plot(D[:, indmax[1]], label = "data")
plt.plot((f_opt*profile)[:, indmax[1]], color="orange", label= "model")
plt.legend()
plt.xlabel('Wavelength [um]')
plt.ylabel('Counts')
plt.subplot(224)
plt.title("Outliers: cut in spatial direction")
plt.plot(outlier_array[:,indmax[1]])
plt.axvline(x = indmax[0], color="red")
plt.ylabel('Residuals')
plt.xlabel('Wavelength [um]')
plt.tight_layout()
plt.show()
plt.clf()
"""Function to optimally extract a spectrum:
Inputs:
D: data array (already background subtracted)
err: error array (in addition to photon noise; e.g. error due to background subtraction)
f_std: box-extracted spectrum (from step 4 of Horne)
var_std: variance of standard spectrum (also from step 4)
M: array masking bad pixels; 0 is bad and 1 is good
nsmooth: number of pixels to smooth over to estimate the spatial profile (7 works well)
sig_cut: cutoff sigma for flagging outliers (10.0 works well)
diagnostics: boolean flag specifying whether to make diagnostic plots
outputs:
f_opt, var_opt: optimally extracted spectrum and its variance"""
def optextr(D, err, f_std, var_std, M, nsmooth, sig_cut, diagnostics):
#STEPS 5-8: estimating spatial profile and removing cosmic rays
f_opt = | np.copy(f_std) | numpy.copy |
import logging
from typing import Union
from UQpy.inference.information_criteria import AIC
from UQpy.inference.information_criteria.baseclass.InformationCriterion import InformationCriterion
from beartype import beartype
from UQpy.inference.MLE import MLE
import numpy as np
class InformationModelSelection:
# Authors: <NAME>, <NAME>
# Last Modified: 12/19 by <NAME>
@beartype
def __init__(
self,
parameter_estimators: list[MLE],
criterion: InformationCriterion = AIC(),
n_optimizations: list[int] = None,
initial_parameters: list[np.ndarray] = None
):
"""
Perform model selection using information theoretic criteria.
Supported criteria are :class:`.BIC`, :class:`.AIC` (default), :class:`.AICc`. This class leverages the
:class:`.MLE` class for maximum likelihood estimation, thus inputs to :class:`.MLE` can also be provided to
:class:`InformationModelSelection`, as lists of length equal to the number of models.
:param parameter_estimators: A list containing a maximum-likelihood estimator (:class:`.MLE`) for each one of the
models to be compared.
:param criterion: Criterion to be used (:class:`.AIC`, :class:`.BIC`, :class:`.AICc)`. Default is :class:`.AIC`
:param initial_parameters: Initial guess(es) for optimization, :class:`numpy.ndarray` of shape
:code:`(nstarts, n_parameters)` or :code:`(n_parameters, )`, where :code:`nstarts` is the number of times the
optimizer will be called. Alternatively, the user can provide input `n_optimizations` to randomly sample
initial guess(es). The identified MLE is the one that yields the maximum log likelihood over all calls of the
optimizer.
"""
self.candidate_models = [mle.inference_model for mle in parameter_estimators]
self.models_number = len(parameter_estimators)
self.criterion: InformationCriterion = criterion
self.logger = logging.getLogger(__name__)
self.n_optimizations = n_optimizations
self.initial_parameters= initial_parameters
self.parameter_estimators: list = parameter_estimators
""":class:`.MLE` results for each model (contains e.g. fitted parameters)"""
# Initialize the outputs
self.criterion_values: list = [None, ] * self.models_number
"""Value of the criterion for all models."""
self.penalty_terms: list = [None, ] * self.models_number
"""Value of the penalty term for all models. Data fit term is then criterion_value - penalty_term."""
self.probabilities: list = [None, ] * self.models_number
"""Value of the model probabilities, computed as
.. math:: P(M_i|d) = \dfrac{\exp(-\Delta_i/2)}{\sum_i \exp(-\Delta_i/2)}
where :math:`\Delta_i = criterion_i - min_i(criterion)`"""
# Run the model selection procedure
if (self.n_optimizations is not None) or (self.initial_parameters is not None):
self.run(self.n_optimizations, self.initial_parameters)
def run(self, n_optimizations: list[int], initial_parameters: list[np.ndarray]=None):
"""
Run the model selection procedure, i.e. compute criterion value for all models.
This function calls the :meth:`run` method of the :class:`.MLE` object for each model to compute the maximum
log-likelihood, then computes the criterion value and probability for each model. If `data` are given when
creating the :class:`.MLE` object, this method is called automatically when the object is created.
:param n_optimizations: Number of iterations that the optimization is run, starting at random initial
guesses. It is only used if `initial_parameters` is not provided. Default is :math:`1`.
The random initial guesses are sampled uniformly between :math:`0` and :math:`1`, or uniformly between
user-defined bounds if an input bounds is provided as a keyword argument to the `optimizer` input parameter.
:param initial_parameters: Initial guess(es) for optimization, :class:`numpy.ndarray` of shape
:code:`(nstarts, n_parameters)` or :code:`(n_parameters, )`, where :code:`nstarts` is the number of times the
optimizer will be called. Alternatively, the user can provide input `n_optimizations` to randomly sample
initial guess(es). The identified MLE is the one that yields the maximum log likelihood over all calls of the
optimizer.
"""
if (n_optimizations is not None and (len(n_optimizations) != len(self.parameter_estimators))) or \
(initial_parameters is not None and len(initial_parameters) != len(self.parameter_estimators)):
raise ValueError("The length of n_optimizations and initial_parameters should be equal to the number of "
"parameter estimators")
# Loop over all the models
for i, parameter_estimator in enumerate(self.parameter_estimators):
# First evaluate ML estimate for all models, do several iterations if demanded
parameters = None
if initial_parameters is not None:
parameters = initial_parameters[i]
optimizations = 0
if n_optimizations is not None:
optimizations = n_optimizations[i]
parameter_estimator.run(n_optimizations=optimizations, initial_parameters=parameters)
# Then minimize the criterion
self.criterion_values[i], self.penalty_terms[i] = \
self.criterion.minimize_criterion(data=parameter_estimator.data,
parameter_estimator=parameter_estimator,
return_penalty=True)
# Compute probabilities from criterion values
self.probabilities = self._compute_probabilities(self.criterion_values)
def sort_models(self):
"""
Sort models in descending order of model probability (increasing order of `criterion` value).
This function sorts - in place - the attribute lists :py:attr:`.candidate_models`, :py:attr:`.ml_estimators`,
:py:attr:`criterion_values`, :py:attr:`penalty_terms` and :py:attr:`probabilities` so that they are sorted from
most probable to least probable model. It is a stand-alone function that is provided to help the user to easily
visualize which model is the best.
No inputs/outputs.
"""
sort_idx = list(np.argsort(np.array(self.criterion_values)))
self.candidate_models = [self.candidate_models[i] for i in sort_idx]
self.parameter_estimators = [self.parameter_estimators[i] for i in sort_idx]
self.criterion_values = [self.criterion_values[i] for i in sort_idx]
self.penalty_terms = [self.penalty_terms[i] for i in sort_idx]
self.probabilities = [self.probabilities[i] for i in sort_idx]
@staticmethod
def _compute_probabilities(criterion_values):
delta = np.array(criterion_values) - min(criterion_values)
prob = np.exp(-delta / 2)
return prob / | np.sum(prob) | numpy.sum |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib as mpl
from matplotlib import colors
from collections import OrderedDict
from tkinter import filedialog, Tk
from scipy.optimize import curve_fit
import netCDF4
# Some plot properties to make them a bit nicer.
plt.ion()
plt.rcParams['font.family'] = 'serif'
fontsize = 12
ms = 2
lw = 5
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the tableau20 RGBs to numbers between (0,1) since this is how mpl accepts them.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
class Readout:
"""
Class object that holds a figure of a grid of plots from the netCDF
output of 3DLIM. Example usage in controlling script lim_readout.py.
"""
def __init__(self, netcdf_file=None, dat_file=None, lim_file=None,
figsize=(15,10), grid_shape=(3,3)):
"""
netcdf_file: Path to the 3DLIM netCDF file. If None is entered then
it will use 'colprobe-test-m2.nc' as a default, which
is convienent for testing.
dat_file: Path to the casename.dat file. This has things like the
duration of the run in it.
lim_file: Path to the casename.lim file. This has the forces in it,
among many, many other things.
figsize: Size of the figure to hold all the plots. The default of
(15, 10) is a good size.
grid_shape: The shape of the grid of plots. Change if you want to
add more plots or whatever.
"""
# Create the master figure to hold all the plots.
self.master_fig = plt.figure(figsize=figsize)
# If no netCDF file given, just use this test one and the dat file.
if netcdf_file is None:
self.netcdf = netCDF4.Dataset('colprobe-test-m2.nc')
with open('colprobe-test-m2.dat') as f:
self.dat = f.read()
else:
self.netcdf = netCDF4.Dataset(netcdf_file)
# Get the .dat file info as well, if supplied, otherwise it's None.
if dat_file:
with open(dat_file) as f:
self.dat = f.read()
else:
self.dat = None
# Same with .lim file.
if lim_file:
with open(lim_file) as f:
self.lim = f.read()
else:
self.lim = None
# Create figure with array of empty plots.
for plot_num in range(1, grid_shape[0] * grid_shape[1] + 1):
self.master_fig.add_subplot(grid_shape[0], grid_shape[1], plot_num)
def print_readout(self):
"""
Output a table with relevant info from the netcdf file.
"""
# Let's just put everything we want into a dict so printing is easy.
output = OrderedDict()
output['3DLIM Version'] = self.netcdf['VERSION'][:].data.tostring().decode()
output['Title'] = self.netcdf['TITLE'][:].data.tostring().decode()
output['File'] = self.netcdf['JOB'][:].data.tostring().decode().split(' ')[0]
#output['Particles'] = format(self.netcdf['MAXIMP'][:].data, ',')
output['Conn. Length'] = self.netcdf['CL'][:].data
if self.dat:
# Get the total CPU time used.
try:
time = int(self.dat.split('TOTAL CPU TIME USED (S)')[1].split('\n')[0])
output['Time'] = str(time) + 's (' + format(time/3600, '.2f') + ' hours)'
except:
pass
try:
num = int(self.dat.split('NO OF IMPURITY IONS TO FOLLOW')[1].split('\n')[0])
output['No. Imp. Ions'] = "{:,}".format(num)
except:
pass
# Find longest output for formatting.
pad = 0
for val in output.values():
if len(str(val)) > pad:
pad = len(str(val))
# Printing commands.
num_stars = 2 + 15 + 2 + pad
print("\n" + "*"*num_stars)
for key, val in output.items():
print("* {:15}{:<{pad}} *".format(key, val, pad=pad))
print("*"*num_stars)
# Also while we're here put the figure title as the filename.
self.master_fig.subplots_adjust(top=0.60)
self.master_fig.suptitle(output['Title'], fontsize=26)
def centerline(self, plot_num, mult_runs=False, log=False, fit_exp=False):
"""
Plot the ITF and OTF deposition along the centerlines.
plot_num: Location in grid to place this plot. I.e. if the grid_shape
is (3,3), then enter a number between 0-8, where the locations
are labelled left to right.
"""
#The deposition array.
#dep_arr = np.array(self.netcdf.variables['NERODS3'][0] * -1)
dep_arr = self.get_dep_array(mult_runs)
# Location of each P bin, and its width. Currently they all have the same width,
# but it may end up such that there are custom widths so we leave it like this.
ps = np.array(self.netcdf.variables['PS'][:].data)
pwids = np.array(self.netcdf.variables['PWIDS'][:].data)
# Array of poloidal locations (i.e. the center of each P bin).
pol_locs = ps - pwids/2.0
# Drop last row since it's garbage.
dep_arr = dep_arr[:-1, :]
pol_locs = pol_locs[:-1]
# Distance cell centers along surface (i.e. the radial locations).
rad_locs = np.array(self.netcdf.variables['ODOUTS'][:].data)
# Get the centerline index (or closest to it).
cline = np.abs(pol_locs).min()
# Index the deposition array at the centerline for plotting.
itf_x = rad_locs[np.where(rad_locs > 0.0)[0]]
itf_y = dep_arr[np.where(pol_locs == cline)[0], np.where(rad_locs > 0.0)[0]]
otf_x = rad_locs[np.where(rad_locs < 0.0)[0]] * -1
otf_y = dep_arr[np.where(pol_locs == cline)[0], np.where(rad_locs < 0.0)[0]]
# Plotting commands.
ax = self.master_fig.axes[plot_num]
if log:
ax.semilogy(itf_x*100, itf_y, '-', label='ITF', ms=ms, color=tableau20[6])
ax.semilogy(otf_x*100, otf_y, '-', label='OTF', ms=ms, color=tableau20[8])
else:
ax.plot(itf_x*100, itf_y, '-', label='ITF', ms=ms, color=tableau20[6])
ax.plot(otf_x*100, otf_y, '-', label='OTF', ms=ms, color=tableau20[8])
ax.legend(fontsize=fontsize)
ax.set_xlabel('Distance along probe (cm)', fontsize=fontsize)
ax.set_ylabel('Deposition (arbitrary units)', fontsize=fontsize)
ax.set_xlim([0, 10])
ax.set_ylim([0,None])
# Option to perform an exponential fit to the data.
if fit_exp:
def exp_fit(x, a, b):
return a * np.exp(-b * x)
popt_itf, pcov_itf = curve_fit(exp_fit, itf_x, itf_y, maxfev=5000)
popt_otf, pcov_otf = curve_fit(exp_fit, otf_x, otf_y, maxfev=5000)
fitx = np.linspace(0, 0.1, 100)
fity_itf = exp_fit(fitx, *popt_itf)
fity_otf = exp_fit(fitx, *popt_otf)
if log:
ax.semilogy(fitx*100, fity_itf, '--', ms=ms, color=tableau20[6])
ax.semilogy(fitx*100, fity_otf, '--', ms=ms, color=tableau20[8])
else:
ax.plot(fitx*100, fity_itf, '--', ms=ms, color=tableau20[6])
ax.plot(fitx*100, fity_otf, '--', ms=ms, color=tableau20[8])
print("Lambdas")
print(" ITF = {:.2f}".format(1/popt_itf[1]*100))
print(" OTF = {:.2f}".format(1/popt_otf[1]*100))
#print("Max ITF/OTF: {:.2f}".format(itf_y.max()/otf_y.max()))
print("Total ITF/OTF: {:.2f}".format(itf_y.sum()/otf_y.sum()))
return {'itf_x':itf_x, 'itf_y':itf_y, 'otf_x':otf_x, 'otf_y':otf_y}
def deposition_contour(self, plot_num, side, probe_width=0.015, rad_cutoff=0.1, mult_runs=False):
"""
Plot the 2D tungsten distribution across the face.
plot_num: Location in grid to place this plot. I.e. if the grid_shape
is (3,3), then enter a number between 0-8, where the locations
are labelled left to right.
side: Either 'ITF' or 'OTF'.
probe_width: The half-width of the collector probe (the variable CPCO).
A = 0.015, B = 0.005, C = 0.0025
rad_cutoff: Only plot data from the tip down to rad_cutoff. Useful
if we want to compare to LAMS since those scans only go
down a certain length of the probe.
*** To-do ***
- Instead of entering the width, pull out CPCO(?) from the netcdf file.
Need to figure out the points being deposited outside the expected
probe width first though.
"""
#The deposition array.
#dep_arr = np.array(self.netcdf.variables['NERODS3'][0] * -1)
dep_arr = self.get_dep_array(mult_runs)
# Location of each P bin, and its width. Currently they all have the same width,
# but it may end up such that there are custom widths so we leave it like this.
ps = np.array(self.netcdf.variables['PS'][:].data)
pwids = np.array(self.netcdf.variables['PWIDS'][:].data)
# Array of poloidal locations (i.e. the center of each P bin).
pol_locs = ps - pwids/2.0
# Drop last row since it's garbage.
dep_arr = dep_arr[:-1, :]
pol_locs = pol_locs[:-1]
# Distance cell centers along surface (i.e. the radial locations).
rad_locs = np.array(self.netcdf.variables['ODOUTS'][:].data)
# Remove data beyond rad_cutoff.
idx = np.where(np.abs(rad_locs)<rad_cutoff)[0]
rad_locs = rad_locs[idx]
dep_arr = dep_arr[:, idx]
# Get only positive values of rad_locs for ITF...
idx = np.where(rad_locs > 0.0)[0]
X_itf, Y_itf = np.meshgrid(rad_locs[idx], pol_locs)
Z_itf = dep_arr[:, idx]
# ... negative for OTF.
idx = np.where(rad_locs < 0.0)[0]
X_otf, Y_otf = np.meshgrid(np.abs(rad_locs[idx][::-1]), pol_locs)
Z_otf = dep_arr[:, idx][:, ::-1]
# Make the levels for the contour plot out of whichever side has the max deposition.
if Z_itf.max() > Z_otf.max():
levels = np.linspace(0, Z_itf.max(), 15)
else:
levels = np.linspace(0, Z_otf.max(), 15)
# Plotting commands.
if side == 'ITF':
X = X_itf; Y = Y_itf; Z = Z_itf
else:
X = X_otf; Y = Y_otf; Z = Z_otf
ax = self.master_fig.axes[plot_num]
ax.contourf(X*100, Y*100, Z, levels=levels, cmap='Reds')
ax.set_xlabel('Distance along probe (cm)', fontsize=fontsize)
ax.set_ylabel('Z location (cm)', fontsize=fontsize)
ax.set_ylim([-probe_width*100, probe_width*100])
props = dict(facecolor='white')
ax.text(0.75, 0.85, side, bbox=props, fontsize=fontsize*1.5, transform=ax.transAxes)
# Print out the total amount collected.
#print("Total W Deposited ({:}): {:.2f}".format(side, Z.sum()))
def velocity_contour_pol(self, pol_slice=0):
"""
Plot the 2D distribution of the (tungsten? plasma?) velocity at a
poloidal slice.
pol_slice: The poloidal coordinate to get a velocity plot in (R, B) space.
"""
pass
def velocity_contour_par(self, par_slice=0):
"""
Plot the 2D distribution of the (tungsten? plasma?) velocity at a
parallel (to B) slice.
par_slice: The parallel coordinate to get a velocity plot in (R, P) space.
"""
pass
def te_plot(self):
"""
Plot the input Te (which is at the midplane?).
"""
pass
def ne_plot(self):
"""
Plot the input ne (which is at the midplane?).
"""
pass
def te_contour(self, plot_num):
"""
Plot the 2D background electron plasma temperature.
plot_num: Location in grid to place this plot. I.e. if the grid_shape
is (3,3), then enter a number between 0-8, where the locations
are labelled left to right.
"""
# Get the connection length to restrict the plot between the two absorbing surfaces.
cl = float(self.netcdf['CL'][:].data)
# Same with the location of the plasma center (the top of the box).
ca = float(self.netcdf['CA'][:].data)
# Get the X and Y grid data.
x = self.netcdf.variables['XOUTS'][:].data
y = self.netcdf.variables['YOUTS'][:].data
# 2D grid of the temperature data.
Z = self.netcdf.variables['CTEMBS'][:].data
# Trim the zeros from the edges of the x and y arrays, and the associated
# data points as well. This is done to stop this data from messing up
# the contours in the contour plot.
xkeep_min = np.nonzero(x)[0].min()
xkeep_max = np.nonzero(x)[0].max()
ykeep_min = np.nonzero(y)[0].min()
ykeep_max = np.nonzero(y)[0].max()
x = x[xkeep_min:xkeep_max]
y = y[ykeep_min:ykeep_max]
Z = Z[ykeep_min:ykeep_max, xkeep_min:xkeep_max]
# Furthermore, trim the data off that is beyond CL.
ykeep_cl = np.where(np.abs(y) < cl)[0]
y = y[ykeep_cl]
Z = Z[ykeep_cl, :]
# Replace zeros in Z with just the smallest density value. Again to
# stop all these zeros from messing up the contour levels.
try:
Zmin = np.partition(np.unique(Z), 1)[1]
Z = np.clip(Z, Zmin, None)
except:
pass
# Create grid for plotting. Note we swap definitions for x and y since
# we want the x-axis in the plot to be the parallel direction (it just
# looks better that way).
Y, X = np.meshgrid(x, y)
# Plotting commands.
ax = self.master_fig.axes[plot_num]
cont = ax.contourf(X, Y, Z, cmap='magma', levels=10)
ax.set_xlim([-cl, cl])
#ax.set_ylim([None, ca])
ax.set_ylim([None, 0.01]) # Contour weird near edge.
ax.set_xlabel('Parallel (m)', fontsize=fontsize)
ax.set_ylabel('Radial (m)', fontsize=fontsize)
cbar = self.master_fig.colorbar(cont, ax=ax)
cbar.set_label('Background Te (eV)')
def ne_contour(self, plot_num):
"""
Plot the 2D background plasma density.
plot_num: Location in grid to place this plot. I.e. if the grid_shape
is (3,3), then enter a number between 0-8, where the locations
are labelled left to right.
"""
# Get the connection length to restrict the plot between the two absorbing surfaces.
cl = float(self.netcdf['CL'][:].data)
# Same with the location of the plasma center (the top of the box)
ca = float(self.netcdf['CA'][:].data)
# Get the X and Y grid data.
x = self.netcdf.variables['XOUTS'][:].data
y = self.netcdf.variables['YOUTS'][:].data
# 2D grid of the temperature data.
Z = self.netcdf.variables['CRNBS'][:].data
# Trim the zeros from the edges of the x and y arrays, and the associated
# data points as well. This is done to stop this data from messing up
# the contours in the contour plot.
xkeep_min = np.nonzero(x)[0].min()
xkeep_max = np.nonzero(x)[0].max()
ykeep_min = np.nonzero(y)[0].min()
ykeep_max = np.nonzero(y)[0].max()
x = x[xkeep_min:xkeep_max]
y = y[ykeep_min:ykeep_max]
Z = Z[ykeep_min:ykeep_max, xkeep_min:xkeep_max]
# Furthermore, trim the data off that is beyond CL.
ykeep_cl = np.where(np.abs(y) < cl)[0]
y = y[ykeep_cl]
Z = Z[ykeep_cl, :]
# Replace zeros in Z with just the smallest density value. Again to
# stop all these zeros from messing up the contour levels.
Zmin = np.partition(np.unique(Z), 1)[1]
Z = np.clip(Z, Zmin, None)
# Create grid for plotting. Note we swap definitions for x and y since
# we want the x-axis in the plot to be the parallel direction (it just
# looks better that way).
Y, X = np.meshgrid(x, y)
# Plotting commands.
ax = self.master_fig.axes[plot_num]
# Create our own levels since the automatic ones are bad.
lev_exp = np.arange(np.floor(np.log10(Z.min())-1), np.ceil(np.log10(Z.max())+1), 0.25)
levs = np.power(10, lev_exp)
cont = ax.contourf(X, Y, Z, cmap='magma', levels=levs, norm=colors.LogNorm())
ax.set_xlim([-cl, cl])
#ax.set_ylim([None, ca])
ax.set_ylim([None, 0.01]) # Contour weird near edge.
ax.set_xlabel('Parallel (m)', fontsize=fontsize)
ax.set_ylabel('Radial (m)', fontsize=fontsize)
cbar = self.master_fig.colorbar(cont, ax=ax)
cbar.set_label('Background ne (m-3)')
def avg_imp_vely(self, plot_num):
"""
SVYBAR: Average impurity velocity at X coordinates in QXS.
plot_num: Location in grid to place this plot. I.e. if the grid_shape
is (3,3), then enter a number between 0-8, where the locations
are labelled left to right.
"""
# Grab the data.
x = self.netcdf.variables['QXS'][:].data
y = self.netcdf.variables['SVYBAR'][:].data
# Plotting commands.
ax = self.master_fig.axes[plot_num]
ax.plot(x, y, '.', ms=ms, color=tableau20[6])
ax.set_xlabel('Radial coordinates (m)', fontsize=fontsize)
ax.set_ylabel('Average Y imp. vel. (m/s)', fontsize=fontsize)
def avg_pol_profiles(self, plot_num, probe_width=0.015, rad_cutoff=0.5):
"""
Plot the average poloidal profiles for each side. Mainly to see if
deposition peaks on the edges.
plot_num: Location in grid to place this plot. I.e. if the grid_shape
is (3,3), then enter a number between 0-8, where the locations
are labelled left to right.
probe_width: The half-width of the collector probe (the variable CPCO).
A = 0.015, B = 0.005, C = 0.0025
rad_cutoff: Only plot data from the tip down to rad_cutoff. Useful
if we want to compare to LAMS since those scans only go
down a certain length of the probe.
"""
# Code copied from above function, deposition_contour. See for comments.
dep_arr = np.array(self.netcdf.variables['NERODS3'][0] * -1)
ps = np.array(self.netcdf.variables['PS'][:].data)
pwids = np.array(self.netcdf.variables['PWIDS'][:].data)
pol_locs = ps - pwids/2.0
dep_arr = dep_arr[:-1, :]
pol_locs = pol_locs[:-1]
rad_locs = np.array(self.netcdf.variables['ODOUTS'][:].data)
idx = np.where(np.abs(rad_locs)<rad_cutoff)[0]
rad_locs = rad_locs[idx]
dep_arr = dep_arr[:, idx]
idx = np.where(rad_locs > 0.0)[0]
X_itf, Y_itf = np.meshgrid(rad_locs[idx], pol_locs)
Z_itf = dep_arr[:, idx]
idx = | np.where(rad_locs < 0.0) | numpy.where |
#!/usr/bin/env python
"""Utility functions and classes for creating mixture models"""
########################################################################
# File: mixture_model.py
#
# Author: <NAME>
# History: 1/7/19 Created
########################################################################
import os
import sys
import numpy as np
import pandas as pd
from argparse import ArgumentParser
from sklearn.mixture import GaussianMixture
from timeit import default_timer as timer
from py3helpers.utils import load_json, create_dot_dict
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from signalalign.hiddenMarkovModel import HmmModel, parse_assignment_file, parse_alignment_file
from signalalign.utils.sequenceTools import get_motif_kmers, find_modification_index_and_character
import tempfile
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn.datasets.samples_generator import make_blobs
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--config', '-c', required=True, action='store',
dest='config', type=str, default=None,
help="Path to config file")
args = parser.parse_args()
return args
def get_nanopore_gauss_mixture(event_means, n_models):
"""Create a mixture model from an array of event means and fit some number of models to the data
:param event_means: array of event means
:param n_models: number of gaussians to fit
"""
model = GaussianMixture(n_models).fit(event_means)
assert model.converged_, "Model has not converged"
return model
def find_best_1d_gaussian_fit(x, max_n, aic=True):
"""
:param x: input data
:param max_n: max number of gaussians to try to fit to data
:param aic: boolean option to use aic or bic. if false use bic as selection criterion
:return:
"""
N = np.arange(1, max_n)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GaussianMixture(N[i]).fit(x)
# use AIC or BIC for model selection
if aic:
aic = [m.aic(x) for m in models]
m_best = models[np.argmin(aic)]
else:
bic = [m.bic(x) for m in models]
m_best = models[np.argmin(bic)]
return m_best
def get_mus_and_sigmas_1d(gaussian_model):
"""Get the mean and stdv of each normal curve given a GaussianMixture model
:param gaussian_model: an already converged GaussianMixture model
:return: list of tuples with tup[0] = mu and tup[1] = sigma
"""
assert gaussian_model.converged_, "Model has not converged"
normals = []
for i, mu in enumerate(gaussian_model.means_):
assert len(gaussian_model.covariances_[i]) == 1, "This function only works for 1D gaussian mixture models"
sigma = np.sqrt(gaussian_model.covariances_[i][0])
# sigma = sigma / gaussian_model.weights_[i]
normals.append((mu, sigma))
return normals
def closest_to_canonical(mixture_normals, canonical_mu):
"""Find the normal distribution closet to canonical mu"""
min_index = 0
min_distance = 1000
for i in range(len(mixture_normals)):
mu = mixture_normals[i][0]
distance = abs(mu - canonical_mu)
if distance < min_distance:
min_index = i
min_distance = distance
match = mixture_normals.pop(min_index)
return match, mixture_normals, min_distance
def fit_model_to_kmer_dist(all_assignments, kmer, n_normals=2):
"""Return a mixture model from the distribution of event means for a given kmer
:param all_assignments: master table of assignments (must have fields "k-mer" and "descaled_event_mean"
:param kmer: str that must be in the assignments table
:param n_normals: number of normal gaussians to fit to distirbution
"""
samples = all_assignments[all_assignments["kmer"] == kmer]["level_mean"].values.reshape(-1, 1)
model = False
if len(samples) == 0:
print("No alignments found for kmer: {}".format(kmer))
else:
model = get_nanopore_gauss_mixture(samples, n_normals)
return model
def generate_gaussian_mixture_model_for_motifs(model_h, assignments, all_kmer_pars, strand,
output_dir, plot=False, name="", target_model=None, show=False):
"""Generate new hmm model using mixture model of assignment data for each required kmer given the set of motifs
:param model_h: HmmModel
:param strand: 't' for template or 'c' for complement
:param plot: plot model data
:param assignments: assignment DataFrame with "strand", "kmer" and "level_mean"
:param all_kmer_pars: list of list of [canonical, modified] kmers
:param output_dir: path to save figures, models and log file
:param name: optional argument for naming the mixture model
:param target_model: use for plotting expected distribution for modified kmer
"""
assert strand in ('t', 'c'), "Strand must be either 'c' or 't'. strand = {}".format(strand)
assignments = assignments[assignments["strand"] == strand]
canonical_mixture_components_comparison = []
if name is not "":
name += "_"
output_model_path = os.path.join(output_dir, "{}_{}mixture_model.hmm".format(strand, name))
for kmer_pair in all_kmer_pars:
old_kmer = kmer_pair[0]
new_kmer = kmer_pair[1]
# fit
mixture_model = fit_model_to_kmer_dist(assignments, old_kmer, n_normals=2)
if mixture_model:
mixture_normals = get_mus_and_sigmas_1d(mixture_model)
kmer_mean, kmer_sd = model_h.get_event_mean_gaussian_parameters(old_kmer)
match, other, distance = closest_to_canonical(mixture_normals, kmer_mean)
# set parameters
model_h.set_kmer_event_mean(new_kmer, other[0][0][0])
model_h.set_kmer_event_sd(new_kmer, other[0][1][0])
canonical_mixture_components_comparison.append(
[old_kmer, kmer_mean, kmer_sd, match[0][0], match[1][0], other[0][0][0],
other[0][1][0], distance[0], strand])
print(old_kmer, mixture_normals)
if plot:
# model_h.plot_kmer_distributions([old_kmer, new_kmer],
# alignment_file_data=assignments,
# savefig_dir=output_dir,
# name=strand)
plot_output_dir = output_dir
if show:
plot_output_dir = None
plot_mixture_model_distribution(old_kmer, new_kmer, kmer_mean, kmer_sd, match[0][0], match[1][0],
other[0][0][0], other[0][1][0], strand, mixture_model=mixture_model,
kmer_assignments=assignments,
save_fig_dir=plot_output_dir,
target_model=target_model)
model_h.normalize(False, False)
model_h.write(output_model_path)
data = pd.DataFrame(canonical_mixture_components_comparison, columns=["kmer",
"canonical_model_mean",
"canonical_model_sd",
"canonical_mixture_mean",
"canonical_mixture_sd",
"modified_mixture_mean",
"modified_mixture_sd",
"distance",
"strand"])
data.sort_values("distance", inplace=True, ascending=False)
log_file = os.path.join(output_dir, "{}_distances.tsv".format(strand))
data.to_csv(log_file, sep="\t", index=False)
return data
def get_motif_kmer_pairs(motif_pair, k, alphabet="ATGC"):
"""Given a motif pair, create a list of all kmers which contain modification """
all_kmer_pars = []
motif_kmers = get_motif_kmers(motif_pair, k, alphabet=alphabet)
pos, old_char, new_char = find_modification_index_and_character(motif_pair[0], motif_pair[1])
for new_kmer in motif_kmers:
# get original kmer
pos = new_kmer.find(new_char)
old_kmer = new_kmer[0:pos] + old_char + new_kmer[pos + 1:]
all_kmer_pars.append([old_kmer, new_kmer])
return all_kmer_pars
def plot_mixture_model_distribution(canonical_kmer, modified_kmer, canonical_model_mean, canonical_model_sd,
canonical_mixture_mean,
canonical_mixture_sd, modified_mixture_mean, modified_mixture_sd,
strand, mixture_model=None, target_model=None,
kmer_assignments=None, save_fig_dir=None):
"""Plot normal distributions from mixture model and compare with original canonical model
:param canonical_model_mean: canonical_model_mean
:param canonical_model_sd: canonical_model_sd
:param canonical_mixture_mean: canonical_mixture_mean
:param canonical_mixture_sd: canonical_mixture_sd
:param modified_mixture_mean: modified_mixture_mean
:param modified_mixture_sd: modified_mixture_sd
:param save_fig_dir: optional path to save figure
:param strand: template or complement ('t' or 'c')
:param canonical_kmer: kmer to plot
:param modified_kmer: modified kmer
:param target_model: model to compare the mixture to
:param mixture_model: an already fit GaussianMixture model
:param kmer_assignments: assignments with ("level_mean" and "kmer") named columns of DataFrame
"""
fig = plt.figure(figsize=(12, 8))
panel1 = plt.axes([0.1, 0.1, .6, .8])
panel1.set_xlabel('pA')
panel1.set_ylabel('Density')
panel1.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)
panel1.set_title("Mixture Model Comparison: {}".format(canonical_kmer))
# original canonical model
x = | np.linspace(canonical_model_mean - 4 * canonical_model_sd, canonical_model_mean + 4 * canonical_model_sd, 200) | numpy.linspace |
from . import unittest, numpy, shapely20_deprecated
import pytest
from shapely.errors import ShapelyDeprecationWarning
from shapely.geos import lgeos
from shapely.geometry import LineString, asLineString, Point, LinearRing
class LineStringTestCase(unittest.TestCase):
def test_linestring(self):
# From coordinate tuples
line = LineString(((1.0, 2.0), (3.0, 4.0)))
self.assertEqual(len(line.coords), 2)
self.assertEqual(line.coords[:], [(1.0, 2.0), (3.0, 4.0)])
# From Points
line2 = LineString((Point(1.0, 2.0), Point(3.0, 4.0)))
self.assertEqual(len(line2.coords), 2)
self.assertEqual(line2.coords[:], [(1.0, 2.0), (3.0, 4.0)])
# From mix of tuples and Points
line3 = LineString((Point(1.0, 2.0), (2.0, 3.0), Point(3.0, 4.0)))
self.assertEqual(len(line3.coords), 3)
self.assertEqual(line3.coords[:], [(1.0, 2.0), (2.0, 3.0), (3.0, 4.0)])
# Bounds
self.assertEqual(line.bounds, (1.0, 2.0, 3.0, 4.0))
# Coordinate access
self.assertEqual(tuple(line.coords), ((1.0, 2.0), (3.0, 4.0)))
self.assertEqual(line.coords[0], (1.0, 2.0))
self.assertEqual(line.coords[1], (3.0, 4.0))
with self.assertRaises(IndexError):
line.coords[2] # index out of range
# Geo interface
self.assertEqual(line.__geo_interface__,
{'type': 'LineString',
'coordinates': ((1.0, 2.0), (3.0, 4.0))})
@shapely20_deprecated
def test_linestring_mutate(self):
line = LineString(((1.0, 2.0), (3.0, 4.0)))
# Coordinate modification
line.coords = ((-1.0, -1.0), (1.0, 1.0))
self.assertEqual(line.__geo_interface__,
{'type': 'LineString',
'coordinates': ((-1.0, -1.0), (1.0, 1.0))})
@shapely20_deprecated
def test_linestring_adapter(self):
# Adapt a coordinate list to a line string
coords = [[5.0, 6.0], [7.0, 8.0]]
la = asLineString(coords)
self.assertEqual(la.coords[:], [(5.0, 6.0), (7.0, 8.0)])
def test_linestring_empty(self):
# Test Non-operability of Null geometry
l_null = LineString()
self.assertEqual(l_null.wkt, 'GEOMETRYCOLLECTION EMPTY')
self.assertEqual(l_null.length, 0.0)
@shapely20_deprecated
def test_linestring_empty_mutate(self):
# Check that we can set coordinates of a null geometry
l_null = LineString()
l_null.coords = [(0, 0), (1, 1)]
self.assertAlmostEqual(l_null.length, 1.4142135623730951)
def test_equals_argument_order(self):
"""
Test equals predicate functions correctly regardless of the order
of the inputs. See issue #317.
"""
coords = ((0, 0), (1, 0), (1, 1), (0, 0))
ls = LineString(coords)
lr = LinearRing(coords)
self.assertFalse(ls.__eq__(lr)) # previously incorrectly returned True
self.assertFalse(lr.__eq__(ls))
self.assertFalse(ls == lr)
self.assertFalse(lr == ls)
ls_clone = LineString(coords)
lr_clone = LinearRing(coords)
self.assertTrue(ls.__eq__(ls_clone))
self.assertTrue(lr.__eq__(lr_clone))
self.assertTrue(ls == ls_clone)
self.assertTrue(lr == lr_clone)
def test_from_linestring(self):
line = LineString(((1.0, 2.0), (3.0, 4.0)))
copy = LineString(line)
self.assertEqual(len(copy.coords), 2)
self.assertEqual(copy.coords[:], [(1.0, 2.0), (3.0, 4.0)])
self.assertEqual('LineString',
lgeos.GEOSGeomType(copy._geom).decode('ascii'))
def test_from_linestring_z(self):
coords = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]
line = LineString(coords)
copy = LineString(line)
self.assertEqual(len(copy.coords), 2)
self.assertEqual(copy.coords[:], coords)
self.assertEqual('LineString',
lgeos.GEOSGeomType(copy._geom).decode('ascii'))
def test_from_linearring(self):
coords = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 0.0)]
ring = LinearRing(coords)
copy = LineString(ring)
self.assertEqual(len(copy.coords), 4)
self.assertEqual(copy.coords[:], coords)
self.assertEqual('LineString',
lgeos.GEOSGeomType(copy._geom).decode('ascii'))
def test_from_single_coordinate(self):
"""Test for issue #486"""
coords = [[-122.185933073564, 37.3629353839073]]
with pytest.raises(ValueError):
ls = LineString(coords)
ls.geom_type # caused segfault before fix
@unittest.skipIf(not numpy, 'Numpy required')
def test_numpy(self):
from numpy import array, asarray
from numpy.testing import assert_array_equal
# Construct from a numpy array
line = LineString(array([[0.0, 0.0], [1.0, 2.0]]))
self.assertEqual(len(line.coords), 2)
self.assertEqual(line.coords[:], [(0.0, 0.0), (1.0, 2.0)])
line = LineString(((1.0, 2.0), (3.0, 4.0)))
la = asarray(line)
expected = array([[1.0, 2.0], [3.0, 4.0]])
assert_array_equal(la, expected)
# Coordinate sequences can be adapted as well
la = asarray(line.coords)
| assert_array_equal(la, expected) | numpy.testing.assert_array_equal |
import pyximport
pyximport.install()
from collections import OrderedDict
import importlib
import logging
import math
import os
from os.path import expanduser, join
import pickle as pkl
from PIL import Image, ImageDraw
from matplotlib import cm
import matplotlib.colors as colors
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from matplotlib.patches import Patch
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
from sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, Isomap
from tqdm import tqdm
from configuration import CONFIG, datasets
from src.MetaSeg.functions.in_out import components_load, probs_gt_load
from src.MetaSeg.functions.metrics import entropy
from src.MetaSeg.functions.utils import estimate_kernel_density
# noinspection DuplicatedCode
class Discovery(object):
def __init__(
self,
embeddings_file="embeddings.p",
distance_metric="euclid",
method="TSNE",
embedding_size=2,
overwrite_embeddings=False,
n_jobs=10,
dpi=300,
main_plot_args={},
tsne_args={},
save_dir=join(CONFIG.metaseg_io_path, "vis_embeddings"),
):
"""Loads the embedding files, computes the dimensionality reductions and calls
the initilization of the main plot.
Args:
embeddings_file (str): Path to the file where all data of segments
including feature embeddings is saved.
distance_metric (str): Distance metric to use for nearest neighbor
computation.
method (str): Method to use for dimensionality reduction of nearest
neighbor embeddings. For plotting the points are always reduced in
dimensionality using PCA to 50 dimensions followed by t-SNE to two
dimensions.
embedding_size (int): Dimensionality of the feature embeddings used for
nearest neighbor search.
overwrite_embeddings (bool): If True, precomputed nearest neighbor and
plotting embeddings from previous runs are overwritten with freshly
computed ones. Otherwise precomputed embeddings are used if requested
embedding_size is matching.
n_jobs (int): Number of processes to use for t-SNE computation.
dpi (int): Dots per inch for graphics that are saved to disk.
main_plot_args (dict): Keyword arguments for the creation of the main plot.
tsne_args (dict): Keyword arguments for the t-SNE algorithm.
save_dir (str): Path to the directory where saved images are placed in.
"""
self.log = logging.getLogger("Discovery")
self.embeddings_file = embeddings_file
self.distance_metrics = ["euclid", "cos"]
self.dm = (
0
if distance_metric not in self.distance_metrics
else self.distance_metrics.index(distance_metric)
)
self.dpi = dpi
self.save_dir = save_dir
os.makedirs(self.save_dir, exist_ok=True)
self.cluster_methods = OrderedDict()
self.cluster_methods["kmeans"] = {"main": KMeans, "kwargs": {}}
self.cluster_methods["spectral"] = {"main": SpectralClustering, "kwargs": {}}
self.cluster_methods["agglo"] = {
"main": AgglomerativeClustering,
"kwargs": {"linkage": "ward"},
}
self.methods_with_ncluster_param = ["kmeans", "spectral", "agglo"]
self.cme = 0
self.clustering = None
self.n_clusters = 25
# colors:
self.standard_color = (0, 0, 1, 1)
self.current_color = (1, 0, 0, 1)
self.nn_color = (1, 0, 1, 1)
self.log.info("Loading data...")
with open(self.embeddings_file, "rb") as f:
self.data = pkl.load(f)
self.iou_preds = self.data["iou_pred"]
self.gt = np.array(self.data["gt"]).flatten()
self.pred = np.array(self.data["pred"]).flatten()
self.gi = self.data[
"image_level_index"
] # global indices (on image level and not on component level)
self.log.info("Loaded {} segment embeddings.".format(self.pred.shape[0]))
self.nearest_neighbors = None
if len(self.data["embeddings"]) == 1:
self.data["plot_embeddings"] = np.array(
[self.data["embeddings"][0][0], self.data["embeddings"][0][1]]
).reshape((1, 2))
self.data["nn_embeddings"] = self.data["plot_embeddings"]
else:
if (
"nn_embeddings" not in self.data.keys()
or overwrite_embeddings
or "plot_embeddings" not in self.data.keys()
) and embedding_size < self.data["embeddings"][0].shape[0]:
self.log.info("Computing PCA...")
n_comp = (
50
if 50
< min(
len(self.data["embeddings"]),
self.data["embeddings"][0].shape[0],
)
else min(
len(self.data["embeddings"]),
self.data["embeddings"][0].shape[0],
)
)
embeddings = PCA(n_components=n_comp).fit_transform(
np.stack(self.data["embeddings"]).reshape(
(-1, self.data["embeddings"][0].shape[0])
)
)
rewrite = True
else:
rewrite = False
if "plot_embeddings" not in self.data.keys() or overwrite_embeddings:
self.log.info("Computing t-SNE for plotting")
self.data["plot_embeddings"] = TSNE(
n_components=2, **tsne_args
).fit_transform(embeddings)
new_plot_embeddings = True
else:
new_plot_embeddings = False
if (
embedding_size >= self.data["embeddings"][0].shape[0]
or embedding_size is None
):
self.embeddings = np.stack(self.data["embeddings"]).reshape(
(-1, self.data["embeddings"][0].shape[0])
)
self.log.debug(
(
"Requested embedding size of {} was greater or equal "
"to data dimensionality of {}. "
"Data has thus not been reduced in dimensionality."
).format(embedding_size, self.data["embeddings"].shape[1])
)
elif (
self.data["nn_embeddings"].shape[1] == embedding_size
if "nn_embeddings" in self.data.keys()
else False
) and not overwrite_embeddings:
self.embeddings = self.data["nn_embeddings"]
self.log.info(
(
"Loaded reduced embeddings "
"({} dimensions) from precomputed file "
+ "for nearest neighbor search."
).format(self.embeddings.shape[1])
)
elif rewrite:
if method == "TSNE":
if (
"plot_embeddings" in self.data.keys()
and embedding_size == 2
and new_plot_embeddings
):
self.embeddings = self.data["plot_embeddings"]
self.log.info(
(
"Reused the precomputed manifold for plotting for "
"nearest neighbor search."
)
)
else:
self.log.info(
(
"Computing t-SNE of dimension "
"{} for nearest neighbor search..."
).format(embedding_size)
)
self.embeddings = TSNE(
n_components=embedding_size, n_jobs=n_jobs, **tsne_args
).fit_transform(embeddings)
else:
self.log.info(
(
"Computing Isomap of dimension "
"{} for nearest neighbor search..."
).format(embedding_size)
)
self.embeddings = Isomap(
n_components=embedding_size,
n_jobs=n_jobs,
).fit_transform(embeddings)
self.data["nn_embeddings"] = self.embeddings
else:
raise ValueError(
(
"Please specify a valid combination of arguments.\n"
"Loading fails if 'overwrite_embeddings' is False and "
"saved 'embedding_size' does not match the requested one."
)
)
# Write added data into pickle file
if rewrite:
with open(self.embeddings_file, "wb") as f:
pkl.dump(self.data, f)
self.x = self.data["plot_embeddings"][:, 0]
self.y = self.data["plot_embeddings"][:, 1]
self.label_mapping = dict()
for d in np.unique(self.data["dataset"]).flatten():
try:
self.label_mapping[d] = getattr(
importlib.import_module(datasets[d].module_name),
datasets[d].class_name,
)(
**datasets[d].kwargs,
).label_mapping
except AttributeError:
self.label_mapping[d] = None
train_dat = self.label_mapping[CONFIG.TRAIN_DATASET.name] = getattr(
importlib.import_module(CONFIG.TRAIN_DATASET.module_name),
CONFIG.TRAIN_DATASET.class_name,
)(
**CONFIG.TRAIN_DATASET.kwargs,
)
self.pred_mapping = train_dat.pred_mapping
if CONFIG.TRAIN_DATASET.name not in self.label_mapping:
self.label_mapping[CONFIG.TRAIN_DATASET.name] = train_dat.label_mapping
self.tnsize = (50, 50)
self.fig_nn = None
self.fig_main = None
self.line_main = None
self.im = None
self.xybox = None
self.ab = None
self.basecolors = np.stack(
[self.standard_color for _ in range(self.x.shape[0])]
)
self.n_neighbors = 49
self.current_pressed_key = None
self.plot_main(**main_plot_args)
def plot_main(self, **plot_args):
"""Initializes the main plot.
Only 'legend' (bool) is currently supported as keyword argument.
"""
self.fig_main = plt.figure(num=1)
self.fig_main.canvas.set_window_title("Embedding space")
ax = self.fig_main.add_subplot(111)
ax.set_axis_off()
self.line_main = ax.scatter(
self.x, self.y, marker="o", color=self.basecolors, zorder=2
)
self.line_main.set_picker(True)
if (
(
plot_args["legend"]
and all(lm is not None for lm in self.label_mapping.values())
)
if "legend" in plot_args
else False
):
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height * 0.8])
legend_elements = []
for d in np.unique(self.data["dataset"]).flatten():
cls = np.unique(self.gt[np.array(self.data["dataset"])[self.gi] == d])
cls = list(
{
(self.label_mapping[d][cl][0], self.label_mapping[d][cl][1])
for cl in cls
}
)
names = np.array([i[0] for i in cls])
cols = np.array([i[1] for i in cls])
legend_elements += [
Patch(
color=tuple(i / 255.0 for i in cols[i]) + (1.0,),
label=names[i]
if not names[i][-1].isdigit()
else names[i][: names[i].rfind(" ")],
)
for i in range(names.shape[0])
]
ax.legend(
loc="upper left",
handles=legend_elements,
ncol=8,
bbox_to_anchor=(0, 1.2),
)
self.basecolors = self.line_main.get_facecolor()
tmp = (
Image.open(self.data["image_path"][self.gi[0]])
.convert("RGB")
.crop(self.data["box"][0])
)
tmp.thumbnail(self.tnsize, Image.ANTIALIAS)
self.im = OffsetImage(tmp, zoom=2)
self.xybox = (50.0, 50.0)
self.ab = AnnotationBbox(
self.im,
(0, 0),
xybox=self.xybox,
xycoords="data",
boxcoords="offset points",
pad=0.3,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(self.ab)
self.ab.set_visible(False)
if plot_args["save_path"] is not None if "save_path" in plot_args else False:
plt.savefig(
expanduser(plot_args["save_path"]), dpi=300, bbox_inches="tight"
)
else:
self.fig_main.canvas.mpl_connect("motion_notify_event", self.hover_main)
self.fig_main.canvas.mpl_connect("button_press_event", self.click_main)
self.fig_main.canvas.mpl_connect("scroll_event", self.scroll)
self.fig_main.canvas.mpl_connect("key_press_event", self.key_press)
self.fig_main.canvas.mpl_connect("key_release_event", self.key_release)
plt.show()
def hover_main(self, event):
"""Action handler for the main plot.
This function shows a thumbnail of the underlying image when a scatter point
is hovered with the mouse.
"""
# if the mouse is over the scatter points
if self.line_main.contains(event)[0]:
# find out the index within the array from the event
ind, *_ = self.line_main.contains(event)[1]["ind"]
# get the figure size
w, h = self.fig_main.get_size_inches() * self.fig_main.dpi
ws = (event.x > w / 2.0) * -1 + (event.x <= w / 2.0)
hs = (event.y > h / 2.0) * -1 + (event.y <= h / 2.0)
# if event occurs in the top or right quadrant of the figure,
# change the annotation box position relative to mouse.
self.ab.xybox = (self.xybox[0] * ws, self.xybox[1] * hs)
# make annotation box visible
self.ab.set_visible(True)
# place it at the position of the hovered scatter point
self.ab.xy = (self.x[ind], self.y[ind])
# set the image corresponding to that point
tmp = (
Image.open(self.data["image_path"][self.gi[ind]])
.convert("RGB")
.crop(self.data["box"][ind])
)
tmp.thumbnail(self.tnsize, Image.ANTIALIAS)
self.im.set_data(tmp)
tmp.close()
else:
# if the mouse is not over a scatter point
self.ab.set_visible(False)
self.fig_main.canvas.draw_idle()
def click_main(self, event):
"""Action handler for the main plot.
This function shows a single or full image or displays nearest neighbors based
on the button that has been pressed and which scatter point was pressed.
"""
if self.line_main.contains(event)[0]:
ind, *_ = self.line_main.contains(event)[1]["ind"]
if self.current_pressed_key == "t" and event.button == 1:
self.store_thumbnail(ind)
elif self.current_pressed_key == "control" and event.button == 1:
self.show_single_image(ind, save=True)
elif self.current_pressed_key == "control" and event.button == 2:
self.show_full_image(ind, save=True)
elif event.button == 1: # left mouse button
self.show_single_image(ind)
elif event.button == 2: # middle mouse button
self.show_full_image(ind)
elif event.button == 3: # right mouse button
if not plt.fignum_exists(2):
# nearest neighbor figure is not open anymore or has not been
# opened yet
self.log.info("Loading nearest neighbors...")
self.nearest_neighbors = self.get_nearest_neighbors(
ind, metric=self.distance_metrics[self.dm]
)
thumbnails = []
for neighbor_ind in self.nearest_neighbors:
thumbnails.append(
Image.open(
self.data["image_path"][self.gi[neighbor_ind]]
).crop(self.data["box"][neighbor_ind])
)
columns = math.ceil(math.sqrt(self.n_neighbors))
rows = math.ceil(self.n_neighbors / columns)
self.fig_nn = plt.figure(num=2, dpi=self.dpi)
self.fig_nn.canvas.set_window_title(
"{} nearest neighbors to selected image".format(
self.n_neighbors
)
)
for p in range(columns * rows):
ax = self.fig_nn.add_subplot(rows, columns, p + 1)
ax.set_axis_off()
if p < len(thumbnails):
ax.imshow(np.asarray(thumbnails[p]))
self.fig_nn.canvas.mpl_connect("button_press_event", self.click_nn)
self.fig_nn.canvas.mpl_connect("key_press_event", self.key_press)
self.fig_nn.canvas.mpl_connect(
"key_release_event", self.key_release
)
self.fig_nn.canvas.mpl_connect("scroll_event", self.scroll)
self.fig_nn.show()
else:
# nearest neighbor figure is already open. Update the figure with
# new nearest neighbor
self.update_nearest_neighbors(ind)
return
self.set_color(ind, self.current_color)
self.flush_colors()
def click_nn(self, event):
"""Action handler for the nearest neighbor window.
When clicking a cropped segment in the nearest neighbor window the same actions
are taken as in the click handler for the main plot.
"""
if event.inaxes in self.fig_nn.axes:
ind = self.get_ind_nn(event)
if self.current_pressed_key == "t" and event.button == 1:
self.store_thumbnail(self.nearest_neighbors[ind])
elif self.current_pressed_key == "control" and event.button == 1:
self.show_single_image(self.nearest_neighbors[ind], save=True)
elif self.current_pressed_key == "control" and event.button == 2:
self.show_full_image(self.nearest_neighbors[ind], save=True)
elif event.button == 1: # left mouse button
self.show_single_image(self.nearest_neighbors[ind])
elif event.button == 2: # middle mouse button
self.show_full_image(self.nearest_neighbors[ind])
elif event.button == 3: # right mouse button
self.update_nearest_neighbors(self.nearest_neighbors[ind])
def key_press(self, event):
"""Performs different actions based on pressed keys."""
self.log.debug("Key '{}' pressed.".format(event.key))
if event.key == "m":
self.dm += 1
self.dm = self.dm % len(self.distance_metrics)
self.log.info(
"Changed distance metric to {}".format(self.distance_metrics[self.dm])
)
elif event.key == "#":
self.cme += 1
self.cme = self.cme % len(self.cluster_methods)
self.log.info(
"Changed clustering method to {}".format(
list(self.cluster_methods.keys())[self.cme]
)
)
elif event.key == "c":
self.log.info(
"Started clustering with {}...".format(
list(self.cluster_methods.keys())[self.cme]
)
)
self.cluster(method=list(self.cluster_methods.keys())[self.cme])
if self.fig_main.axes[0].get_legend() is not None:
self.fig_main.axes[0].get_legend().remove()
self.basecolors = cm.get_cmap("viridis", (max(self.clustering) + 1))(
self.clustering
)
self.flush_colors()
elif event.key == "g":
self.color_gt()
elif event.key == "h":
self.color_pred()
elif event.key == "b":
self.set_color(list(range(self.basecolors.shape[0])), self.standard_color)
if self.fig_main.axes[0].get_legend() is not None:
self.fig_main.axes[0].get_legend().remove()
self.flush_colors()
elif event.key == "d":
self.show_density()
self.current_pressed_key = event.key
def key_release(self, event):
"""Clears the variable where the last pressed key is saved."""
self.current_pressed_key = None
self.log.debug("Key '{}' released.".format(event.key))
def scroll(self, event):
"""Increases or decreases number of nearest neighbors when scrolling on
the main or nearest neighbor plot."""
if event.button == "up":
self.n_neighbors += 1
self.log.info(
"Increased number of nearest neighbors to {}".format(self.n_neighbors)
)
elif event.button == "down":
if self.n_neighbors > 0:
self.n_neighbors -= 1
self.log.info(
"Decreased number of nearest neighbors to {}".format(
self.n_neighbors
)
)
def show_single_image(self, ind, save=False):
"""Displays the full image belonging to a segment. The segment is marked with
a red bounding box."""
self.log.info("{} image...".format("Saving" if save else "Loading"))
img_box = self.draw_box_on_image(ind)
fig_tmp = plt.figure(max(3, max(plt.get_fignums()) + 1), dpi=self.dpi)
ax = fig_tmp.add_subplot(111)
ax.set_axis_off()
ax.imshow(np.asarray(img_box), interpolation="nearest")
if save:
fig_tmp.subplots_adjust(
bottom=0, left=0, right=1, top=1, hspace=0, wspace=0
)
ax.margins(0.05, 0.05)
fig_tmp.gca().xaxis.set_major_locator(plt.NullLocator())
fig_tmp.gca().yaxis.set_major_locator(plt.NullLocator())
fig_tmp.savefig(
join(self.save_dir, "image_{}.jpg".format(ind)),
bbox_inches="tight",
pad_inches=0.0,
)
self.log.debug(
"Saved image to {}".format(
join(self.save_dir, "image_{}.jpg".format(ind))
)
)
else:
fig_tmp.canvas.set_window_title(
"Dataset: {}, Image index: {}".format(
self.data["dataset"][self.gi[ind]],
self.data["image_index"][self.gi[ind]],
)
)
fig_tmp.tight_layout(pad=0.0)
fig_tmp.show()
def show_full_image(self, ind, save=False):
"""Displays four panels of the full image belonging to a segment.
Top left: Entropy heatmap of prediction.
Top right: Predicted IoU of each segment.
Bottom left: Source image with ground truth overlay.
Bottom right: Predicted semantic segmentation.
"""
self.log.info("{} detailed image...".format("Saving" if save else "Loading"))
box = self.data["box"][ind]
image = np.asarray(
Image.open(self.data["image_path"][self.gi[ind]]).convert("RGB")
)
image_index = self.data["image_index"][self.gi[ind]]
iou_pred = self.data["iou_pred"][self.gi[ind]]
dataset = self.data["dataset"][self.gi[ind]]
model_name = self.data["model_name"][self.gi[ind]]
pred, gt, image_path = probs_gt_load(
image_index,
input_dir=join(CONFIG.metaseg_io_path, "input", model_name, dataset),
)
components = components_load(
image_index,
components_dir=join(
CONFIG.metaseg_io_path, "components", model_name, dataset
),
)
e = entropy(pred)
pred = pred.argmax(2)
predc = np.asarray(
[
self.pred_mapping[pred[ind_i, ind_j]][1]
for ind_i in range(pred.shape[0])
for ind_j in range(pred.shape[1])
]
).reshape(image.shape)
overlay_factor = [1.0, 0.5, 1.0]
if self.label_mapping[dataset] is not None:
gtc = np.asarray(
[
self.label_mapping[dataset][gt[ind_i, ind_j]][1]
for ind_i in range(gt.shape[0])
for ind_j in range(gt.shape[1])
]
).reshape(image.shape)
else:
gtc = np.zeros_like(image)
overlay_factor[1] = 0.0
img_predc, img_gtc, img_entropy = [
Image.fromarray(
| np.uint8(arr * overlay_factor[i] + image * (1 - overlay_factor[i])) | numpy.uint8 |
import pandas as pd
import numpy as np
import scipy.stats
# AUC comparison adapted from
# https://github.com/Netflix/vmaf/
def compute_midrank(x):
"""Computes midranks.
Args:
x - a 1D numpy array
Returns:
array of midranks
"""
J = np.argsort(x)
Z = x[J]
N = len(x)
T = np.zeros(N, dtype=np.float)
i = 0
while i < N:
j = i
while j < N and Z[j] == Z[i]:
j += 1
T[i:j] = 0.5*(i + j - 1)
i = j
T2 = np.empty(N, dtype=np.float)
# Note(kazeevn) +1 is due to Python using 0-based indexing
# instead of 1-based in the AUC formula in the paper
T2[J] = T + 1
return T2
def fastDeLong(predictions_sorted_transposed, label_1_count):
"""
The fast version of DeLong's method for computing the covariance of
unadjusted AUC.
Args:
predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]
sorted such as the examples with label "1" are first
Returns:
(AUC value, DeLong covariance)
Reference:
@article{sun2014fast,
title={Fast Implementation of DeLong's Algorithm for
Comparing the Areas Under Correlated Receiver Operating Characteristic Curves},
author={<NAME> and <NAME>},
journal={IEEE Signal Processing Letters},
volume={21},
number={11},
pages={1389--1393},
year={2014},
publisher={IEEE}
}
"""
# Short variables are named as they are in the paper
m = label_1_count
n = predictions_sorted_transposed.shape[1] - m
positive_examples = predictions_sorted_transposed[:, :m]
negative_examples = predictions_sorted_transposed[:, m:]
k = predictions_sorted_transposed.shape[0]
tx = np.empty([k, m], dtype=np.float)
ty = | np.empty([k, n], dtype=np.float) | numpy.empty |
#!/usr/bin/env python3
import logging
#import click
import numpy as np
import argparse
import pickle
from os.path import abspath, dirname, join
from gym.spaces import Tuple
from mujoco_py import const, MjViewer
from mae_envs.viewer.env_viewer import EnvViewer
from mae_envs.wrappers.multi_agent import JoinMultiAgentActions
from mujoco_worldgen.util.envs import examine_env, load_env
from mujoco_worldgen.util.types import extract_matching_arguments
from mujoco_worldgen.util.parse_arguments import parse_arguments
from runpy import run_path
from mae_envs.modules.util import (uniform_placement, center_placement,
uniform_placement_middle)
from gym.spaces import Box, MultiDiscrete, Discrete
# from simphas.MRL import mpolicy
# import gym
# from RL_brain_2 import PolicyGradient
from RL_brain_3 import PolicyGradientAgent
# import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type=float, default=1e-3, help='learning rate')
parser.add_argument('--GAMMA', type=float, default=0.999,)
parser.add_argument('--episode', type=int, default=350)
parser.add_argument('--n_episode', type=int, default=50000)
parser.add_argument('--opt', default='SGLD')
parser.add_argument('--n_hiders', type=int, default=1)
parser.add_argument('--n_seekers', type=int, default=1)
parser.add_argument('--n_agents', type=int, default=2)
parser.add_argument('--seeds', type=int, default=1)
parser.add_argument('--out', default='output')
parser.add_argument('--s_speed', type=int, default=1)
parser.add_argument('--h_speed', type=int, default=1)
parser.add_argument('--fileseeker', default='policy.pkl')
parser.add_argument('--filehider', default='policy.pkl')
parser.add_argument('--outflag',type=int, default=0)
parser.add_argument('--vlag',type=int, default=0)
args = parser.parse_args()
def edge_punish(x, y, l=0.2, p=3.53, w=0):
xx = 0.0
if (np.abs(x - 0) < l) | (np.abs(x - p) < l):
xx = xx + 1.0
elif (np.abs(y - 0) < l) | (np.abs(y - p) < l):
xx = xx + 1.0
return w * xx * 1.0
'''multiple setting
def matdis(n, obs_x):
dism = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i != j:
dism[i, j] = np.sqrt(np.sum((obs_x[i, :2] - obs_x[j, :2]) ** 2))
return dism
def matmas(n, mas):
matm = np.empty([n, n], dtype=bool)
for i in range(n):
for j in range(n):
if i > j:
matm[i, j] = mas[i, j]
elif i < j:
matm[i, j] = mas[i, j - 1]
else:
matm[i, j] = False
return matm
def game_rew(n,n_seekers, dism, matm, thr=1.0):
return np.sum( (np.sum ( ((dism < np.ones((n,n))*thr) & (matm))[-n_seekers:], axis=0)>0))
'''
env_name = 'mae_envs/envs/mybase.py'
display = False
n_agents= args.n_agents
n_seekers=args.n_seekers
n_hiders=args.n_hiders
episode=args.episode
n_episode=args.n_episode
kwargs = {}
seed=args.seeds
opt=args.opt
lr=args.learning_rate
out=args.out
outflag=args.outflag
s_speed=args.s_speed
h_speed=args.h_speed
sfile=args.fileseeker
hfile=args.filehider
vlag=args.vlag
GAMMA=args.GAMMA
kwargs.update({
'n_agents': n_agents,
'n_seekers': n_seekers,
'n_hiders': n_hiders,
'n_boxes':0,
'cone_angle': 2 * np.pi,
#'n_substeps' : 1
})
module = run_path(env_name)
make_env = module["make_env"]
args_to_pass, args_remaining = extract_matching_arguments(make_env, kwargs)
env = make_env(**args_to_pass)
env.reset()
if display:
env = EnvViewer(env)
env.env_reset()
print(out)
def main(sk=None,hd=None,vlag=0):
rhlist = []
rslist = []
Seeker=sk
Hider=hd
if Seeker == None:
Seeker = PolicyGradientAgent(lr, [8], n_actions=9, layer1_size=64, layer2_size=64,opt=opt,seed=seed,GAMMA=GAMMA)
if Hider == None:
Hider = PolicyGradientAgent(lr, [8], n_actions=9, layer1_size=64, layer2_size=64,opt=opt,seed=seed+12345, GAMMA=GAMMA)
for ii in range(n_episode):
if display:
env.env_reset()
else:
env.reset()
sampleaction = np.array([[5, 5, 5], [5, 5, 5]])
action = {'action_movement': sampleaction}
obs, rew, down, _ = env.step(action)
observation = np.array(
[obs['observation_self'][0][0], obs['observation_self'][0][1], obs['observation_self'][0][4],
obs['observation_self'][0][5], obs['observation_self'][1][0], obs['observation_self'][1][1],
obs['observation_self'][1][4], obs['observation_self'][1][5]])
for i in range(episode):
action_Seeker = Seeker.choose_action(observation)
action_Hider = Hider.choose_action(observation)
if np.random.rand() > 1:
action_Hider = | np.random.randint(9) | numpy.random.randint |
from __future__ import division
import numpy as np
from numpy.linalg import inv
from scipy import stats
from itertools import islice
def online_changepoint_detection(data, hazard_func, observation_likelihood):
maxes = np.zeros(len(data) + 1)
R = np.zeros((len(data) + 1, len(data) + 1))
R[0, 0] = 1
for t, x in enumerate(data):
# Evaluate the predictive distribution for the new datum under each of
# the parameters. This is the standard thing from Bayesian inference.
predprobs = observation_likelihood.pdf(x)
# Evaluate the hazard function for this interval
H = hazard_func(np.array(range(t+1)))
# Evaluate the growth probabilities - shift the probabilities down and to
# the right, scaled by the hazard function and the predictive
# probabilities.
R[1:t+2, t+1] = R[0:t+1, t] * predprobs * (1-H)
# Evaluate the probability that there *was* a changepoint and we're
# accumulating the mass back down at r = 0.
R[0, t+1] = np.sum( R[0:t+1, t] * predprobs * H)
# Renormalize the run length probabilities for improved numerical
# stability.
R[:, t+1] = R[:, t+1] / | np.sum(R[:, t+1]) | numpy.sum |
import sys
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from get_data_brasil import run_crear_excel_brasil
from get_data_brasil_wcota import run_crear_excel_brasil_wcota
from get_data_pernambuco import run_crear_excel_recife
from get_data_ourworldindata import run_crear_excel_ourworldindata
from pandas import ExcelWriter
import colormap
import plotly.graph_objects as go
from PIL import Image
import base64
import os
matplotlib.use('tkagg')
def plotly_html(a_14_days, p_seven, dia, bra_title, save_path, filename_bg):
for i in range(len(p_seven)):
if p_seven[i] < 0.0:
p_seven[i] = 0.0
color_map = []
for i in range(len(a_14_days)):
if i < len(a_14_days) - 60:
color_map.append('rgba(0, 0, 0, 0.1)')
elif i == len(a_14_days) - 1:
color_map.append('rgba(255, 255, 255, 0.6)')
else:
color_map.append('Blue')
fig = go.Figure()
fig.add_trace(go.Scatter(x=a_14_days,
y=p_seven,
text=dia,
mode='lines+markers',
marker=dict(
color=color_map,
showscale=False,
size=10,
line=dict(
color='Black',
width=0.2)),
line=dict(
color="Black",
width=0.5,
dash="dot"),
))
fig.add_shape(type="line",
x0=0,
y0=1,
x1=max(a_14_days),
y1=1,
line=dict(
color="Black",
width=1,
dash="dot",
))
image_filename = filename_bg
img = base64.b64encode(open(image_filename, 'rb').read())
x = round(a_14_days.max())
y = round(p_seven.max())
print(x, y)
fig.add_layout_image(
dict(
source='data:image/png;base64,{}'.format(img.decode()),
xref="x",
yref="y",
x=0,
y=p_seven.max(),
sizex=a_14_days.max(),
sizey=p_seven.max(),
xanchor="left",
yanchor="top",
sizing="stretch",
opacity=0.95,
layer="below"))
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.9,
text="EPG > 100: High", showarrow=False))
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.87, y1=0.89, fillcolor="Red", line_color="Red")
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.86,
text=" 70 < EPG < 100: Moderate-high", showarrow=False))
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.86, y1=0.78, fillcolor="Yellow", line_color="Yellow")
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.82,
text=" 30 < EPG < 70 : Moderate", showarrow=False))
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.78,
text="EPG < 30: Low", showarrow=False))
fig.add_annotation(dict(font=dict(color='blue', size=9),
xref="paper", yref="paper",
x=0.9, y=0.728,
text="Last 60 days", showarrow=False))
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.77, y1=0.74, fillcolor="Green", line_color="Green")
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.725, y1=0.70, fillcolor="Blue", line_color="Blue")
fig.update_layout(plot_bgcolor='rgb(255,255,255)',
width=800,
height=600,
xaxis_showgrid=False,
yaxis_showgrid=False,
xaxis_title="Attack rate per 10⁵ inh. (last 14 days)",
yaxis_title="\u03C1 (mean of the last 7 days)",
title={
'text': bra_title,
'y': 0.9,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
)
fig.update_xaxes(rangemode="tozero")
fig.update_yaxes(rangemode="tozero")
# fig.show()
os.remove(filename_bg)
fig.write_html(filename_bg+'.html', include_plotlyjs="cdn")
def run_risk_diagrams(argv_1, deaths, file_others_cases, file_others_pop, radio_valor, ourworldindata_country):
if argv_1:
last_days_time = 30
brasil = False
pt = False
html = False
last_days = False
animation = False
if radio_valor == 1:
last_days = True
elif radio_valor == 2:
html = True
else:
pass
dataTable = []
dataTable_EPG = []
if argv_1 == 'brasil' and deaths == 'False':
try:
run_crear_excel_brasil()
filename = 'data/Data_Brasil.xlsx'
filename_population = 'data/pop_Brasil_v3.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'brasil_regions' and deaths == 'False':
try:
run_crear_excel_brasil()
filename = 'data/Data_Brasil.xlsx'
filename_population = 'data/pop_Brasil_Regions_v3.xlsx'
sheet_name = 'Regions'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'recife':
try:
run_crear_excel_recife()
filename = 'data/cases-recife.xlsx'
filename_population = 'data/pop_recife_v1.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'WCOTA':
try:
#run_crear_excel_brasil_wcota('AM')
#run_crear_excel_brasil_wcota('PB')
run_crear_excel_brasil_wcota('SP')
filename = 'data/cases-wcota.xlsx'
#filename_population = 'data/pop_AM_v1.xlsx'
filename_population = 'data/pop_SP_v1.xlsx'
#filename_population = 'data/pop_PB_v1.xlsx'
#filename = 'data/Dades.xlsx'
#filename_population = 'data/pop_Dades.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'ourworldindata' and deaths == 'False':
try:
run_crear_excel_ourworldindata(ourworldindata_country)
filename = 'data/ourworldindata.xlsx'
filename_population = 'data/pop_ourworldindata_v1.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'others' and deaths == 'False':
try:
filename = file_others_cases
filename_population = file_others_pop
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
data = pd.read_excel(filename, sheet_name=sheet_name)
population = pd.read_excel(filename_population)
dia = pd.to_datetime(data['date']).dt.strftime('%d/%m/%Y')
dia = dia.to_numpy()
region = population.columns
for ID in range(len(region)):
cumulative_cases = data[region[ID]]
cumulative_cases = cumulative_cases.to_numpy()
new_cases = np.zeros((len(cumulative_cases)), dtype=np.int)
for i in range(len(cumulative_cases)):
if i != 0:
new_cases[i] = cumulative_cases[i] - \
cumulative_cases[i - 1]
p = np.zeros((len(new_cases)), dtype=np.float)
for i in range(7, len(new_cases)):
div = 0
aux = new_cases[i - 5] + new_cases[i - 6] + new_cases[i - 7]
if aux == 0:
div = 1
else:
div = aux
p[i] = min((new_cases[i] + new_cases[i - 1] +
new_cases[i - 2]) / div, 4)
p_seven = np.zeros((len(new_cases)), dtype=np.float)
n_14_days = np.zeros((len(new_cases)), dtype=np.float)
a_14_days = np.zeros((len(new_cases)), dtype=np.float)
risk = np.zeros((len(new_cases)), dtype=np.float)
risk_per_10 = np.zeros((len(new_cases)), dtype=np.float)
day13 = 13
for i in range(day13, len(new_cases)):
p_seven[i] = | np.average(p[i - 6:i + 1]) | numpy.average |
"""Create Figures and Extract Results for CVPR paper.
Author: <NAME>
Email : <EMAIL>
"""
from __future__ import print_function
import os
from os.path import join, isdir, isfile
from collections import OrderedDict
import cPickle as pickle
import numpy as np
from scipy.misc import imsave
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import spiker
from spiker.models import utils
from spiker.data import ddd17
def compute_log_curve(log_name, num_runs=4):
"""Compute log curve, provide mean and standard deviation."""
log_collector = []
for run_idx in range(1, num_runs+1):
# prepare log path
log_file = join(spiker.SPIKER_EXPS+"-run-%d" % (run_idx),
log_name, "csv_history.log")
log_dict = utils.parse_csv_log(log_file)
log_collector.append(log_dict)
# compute for train loss
train_loss = np.vstack(
(log_collector[0]["loss"][np.newaxis, ...],
log_collector[1]["loss"][np.newaxis, ...],
log_collector[2]["loss"][np.newaxis, ...],
log_collector[3]["loss"][np.newaxis, ...]))
train_loss = train_loss.astype("float64")
train_loss_mean = np.mean(train_loss, axis=0)
train_loss_std = np.std(train_loss, axis=0)
# compute for test loss
test_loss = np.vstack(
(log_collector[0]["val_loss"][np.newaxis, ...],
log_collector[1]["val_loss"][np.newaxis, ...],
log_collector[2]["val_loss"][np.newaxis, ...],
log_collector[3]["val_loss"][np.newaxis, ...]))
test_loss = test_loss.astype("float64")
test_loss_mean = | np.mean(test_loss, axis=0) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('seaborn-whitegrid')
names = ['Age', 'Workclass', 'fnlwgt', 'Education', 'Education_Num', 'Marital_Status', 'Occupation', 'Relationship',
'Race', 'Sex', 'Capital_Gain', 'Capital_Loss', 'Hours_per_week', 'Country', 'income']
# 获取原始数据集
adult_data = pd.read_csv('../data/adult.csv', names=names)
print('原始数据集', adult_data.shape)
print(adult_data.head())
def laplace_mech(v, sensitivity, epsilon):
return v + np.random.laplace(loc=0, scale=sensitivity / epsilon)
def pct_error(orig, priv):
return np.abs(orig - priv) / orig * 100.0
# AboveThreshold Algorithm
def above_threshold(df, queries, T, epsilon):
T_hat = T + | np.random.laplace(loc=0, scale=2 / epsilon) | numpy.random.laplace |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This modules contains some testing of intermediary results between the PyTorch and the Theano framework.
"""
import numpy as np
from paths import DEBUG_DIR_MNIST_012, DEBUG_DIR_MNIST_rot, DEBUG_DIR_ETH80
from debug import *
######################################################################################################
# PROCEDURE #
######################################################################################################
# In order to correctly debug the PyTorch framework we load the exact same data, i.e. the first batch
# of the test data. We then save/print and compare every step of the forward pass of a rather simple
# model at the beginning, and we increase the complexity of the model as the tests are passed
# correctly. The last 't' in files named like xxx_t.xxx stands for Theano, 'p' stands for PyTorch.
######################################################################################################
# WITH ALL WEIGHTS SET TO 1 - MNIST_012 #
######################################################################################################
path = DEBUG_DIR_MNIST_012 + 'constant_weights/'
# input averaged with batch ##########################################################################
prepared_input_p = np.load(path + 'prepared_input_p.npy')
prepared_input_t = np.load(path + 'prepared_input_t.npy')
prepared_input_t = np.transpose(prepared_input_t, (0,2,1))
# plot_pytorch_theano_image(
# [prepared_input_p[0,:,0], prepared_input_t[0,:,0]],
# dir=path,
# name='prepared_input'
# )
# np.testing.assert_allclose(actual=prepared_input_p[0], desired=prepared_input_t[0], rtol=1e-7) # OK
######################################################################################################
##################################
# Spectral Convolutional Layer 1 #
##################################
# filter operator ####################################################################################
filter_operator1_p = np.load(path + 'filter_operator1_p.npy')
filter_operator1_t = np.load(path + 'filter_operator1_t.npy')
filter_operator1_t = np.transpose(filter_operator1_t, (2,1,0))
# plot_pytorch_theano_filter_operator(
# [filter_operator1_p[:,:,2], filter_operator1_t[:,:,2]],
# dir=path,
# name='filter_operator1'
# )
# np.testing.assert_allclose(actual=filter_operator1_p, desired=filter_operator1_t, rtol=1e-7) # 0.17%
######################################################################################################
# y ##################################################################################################
y1_p = np.load(path + 'y1_p.npy')
y1_t = np.load(path + 'y1_t.npy')
y1_t = np.transpose(y1_t, (0,3,2,1))
# plot_pytorch_theano_image(
# [y1_p[0,:,0], y1_t[0,:,0]],
# dir=path,
# name='y1'
# )
# np.testing.assert_allclose(actual=y1_p, desired=y1_t, rtol=1e-6) # 3.30%
######################################################################################################
# z ##################################################################################################
spectral_conv1_p = np.load(path + 'spectral_conv1_p.npy')
spectral_conv1_t = np.load(path + 'spectral_conv1_t.npy')
spectral_conv1_t = np.transpose(spectral_conv1_t, (0,2,1))
# plot_pytorch_theano_image(
# [spectral_conv1_p[0,:,0], spectral_conv1_t[0,:,0]],
# dir=path,
# name='spectral_conv1'
# )
# plot_pytorch_theano_image_diff(
# [spectral_conv1_p[0,:,0], spectral_conv1_t[0,:,0]],
# dir=path,
# name='spectral_conv1_diff'
# )
# np.testing.assert_allclose(actual=spectral_conv1_p, desired=spectral_conv1_t, rtol=1e-6) # 3.30%
######################################################################################################
##################################
# Dynamic Pooling 1 #
##################################
# mask ###############################################################################################
mask1_p = np.load(path + 'mask1_p.npy')
mask1_t = np.load(path + 'mask1_t.npy')
mask1_t = mask1_t[..., np.newaxis]
# plot_pytorch_theano_image(
# [mask1_p[0,:,0],
# mask1_t[0,:,0]],
# dir=path,
# name='mask1'
# )
# plot_pytorch_theano_image_diff(
# [mask1_p[0,:,0], mask1_t[0,:,0]],
# dir=path,
# name='mask1_diff'
# )
# np.testing.assert_allclose(actual=mask1_p, desired=mask1_t, rtol=1e-6) # 1.41%
######################################################################################################
##################################
# Spectral Convolutional Layer 2 #
##################################
# y ##################################################################################################
y2_p = np.load(path + 'y2_p.npy')
y2_t = np.load(path + 'y2_t.npy')
# y2_t = np.transpose(y2_t, (0,3,2,1))
# plot_pytorch_theano_image(
# [y2_p[0,:,0,0],
# y2_t[0,:,0,0]],
# dir=path,
# name='y2'
# )
# np.testing.assert_allclose(actual=y2_p, desired=y2_t, rtol=1e-6) # 9.68%
######################################################################################################
# z ##################################################################################################
spectral_conv2_p = np.load(path + 'spectral_conv2_p.npy')
spectral_conv2_t = np.load(path + 'spectral_conv2_t.npy')
spectral_conv2_t = np.transpose(spectral_conv2_t, (0,2,1))
# plot_pytorch_theano_image(
# [spectral_conv2_p[0,:,0],
# spectral_conv2_t[0,:,0]],
# dir=path,
# name='spectral_conv2'
# )
# plot_pytorch_theano_image_diff(
# [spectral_conv2_p[0,:,0], spectral_conv2_t[0,:,0]],
# dir=path,
# name='spectral_conv2_diff'
# )
# np.testing.assert_allclose(actual=spectral_conv2_p, desired=spectral_conv2_t, rtol=1e-6) # 9.78%
######################################################################################################
##################################
# Dynamic Pooling 2 #
##################################
# mask ###############################################################################################
mask2_p = np.load(path + 'mask2_p.npy')
mask2_t = np.load(path + 'mask2_t.npy')
mask2_t = mask2_t[..., np.newaxis]
# plot_pytorch_theano_image(
# [mask2_p[0,:,0],
# mask2_t[0,:,0]],
# dir=path,
# name='mask2'
# )
# plot_pytorch_theano_image_diff(
# [mask2_p[0,:,0], mask2_t[0,:,0]],
# dir=path,
# name='mask2_diff'
# )
# np.testing.assert_allclose(actual=mask2_p, desired=mask2_t, rtol=1e-6) # 1.55%
######################################################################################################
##################################
# Spectral Convolutional Layer 3 #
##################################
# y ##################################################################################################
y3_p = np.load(path + 'y3_p.npy')
y3_t = np.load(path + 'y3_t.npy')
# y3_t = np.transpose(y3_t, (0,3,2,1))
# plot_pytorch_theano_image(
# [y3_p[0,:,0,0],
# y3_t[0,:,0,0]],
# dir=path,
# name='y3'
# )
# np.testing.assert_allclose(actual=y3_p, desired=y3_t, rtol=1e-6) # 5.12%
######################################################################################################
# z ##################################################################################################
spectral_conv3_p = np.load(path + 'spectral_conv3_p.npy')
spectral_conv3_t = np.load(path + 'spectral_conv3_t.npy')
spectral_conv3_t = | np.transpose(spectral_conv3_t, (0,2,1)) | numpy.transpose |
import numpy as np
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This file contains a set of functions used to generate a uniformly #
# random symplectic matrix that is symplectic with respect with #
# L = direct_sum_{j=1}^n X #
# The only function you will likely need to use is symplectic(i, n) #
# DO NOT MODIFY (unless you have a deep understanding of the code) #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def test_gram_schmidt(symp_basis):
"""
Tests to make sure we have a symplectic basis
where symp_basis = (v_1, w_1, ..., v_n, w_n)
"""
n = symp_basis.shape[0] // 2
for j in range(n):
for k in range(n):
v_j = symp_basis[:, 2 * j]
v_k = symp_basis[:, 2 * k]
w_j = symp_basis[:, 2 * j + 1]
w_k = symp_basis[:, 2 * k + 1]
if (inner_prod(v_j, w_k) != (j == k)):
return False
if not(inner_prod(v_j, v_k) == inner_prod(w_j, w_k) == 0):
return False
return True
def get_lambda(n):
"""
Creates a 2n x 2n lambda matrix L
NOTE: This lambda matrix is NOT the one conventionally used for
symplectic matrices
"""
x = np.array([[0, 1], [1, 0]])
L = np.array([[0, 1], [1, 0]])
for i in range(n - 1):
L = direct_sum(L, x) % 2
return L
def direct_sum(a, b):
"""
Returns direct sum of matrices a and b
"""
m, n = a.shape
p, q = b.shape
top = np.hstack((a, np.zeros((m, q))))
bottom = np.hstack((np.zeros((p, n)), b))
return np.vstack((top, bottom))
def numberofcosets(n):
"""
Returns the number of different cosets
"""
x = np.power(2, 2 * n - 1) * ( | np.power(2, 2 * n) | numpy.power |
import sys
import numpy as np
import pandas as pd
import openmdao.api as om
from wisdem.commonse import gravity
eps = 1e-3
# Convenience functions for computing McDonald's C and F parameters
def chsMshc(x):
return np.cosh(x) * np.sin(x) - np.sinh(x) * np.cos(x)
def chsPshc(x):
return np.cosh(x) * np.sin(x) + np.sinh(x) * np.cos(x)
def carterFactor(airGap, slotOpening, slotPitch):
"""Return Carter factor
(based on Langsdorff's empirical expression)
See page 3-13 Boldea Induction machines Chapter 3
"""
gma = (2 * slotOpening / airGap) ** 2 / (5 + 2 * slotOpening / airGap)
return slotPitch / (slotPitch - airGap * gma * 0.5)
# ---------------
def carterFactorMcDonald(airGap, h_m, slotOpening, slotPitch):
"""Return Carter factor using Carter's equation
(based on Schwartz-Christoffel's conformal mapping on simplified slot geometry)
This code is based on Eq. B.3-5 in Appendix B of McDonald's thesis.
It is used by PMSG_arms and PMSG_disc.
h_m : magnet height (m)
b_so : stator slot opening (m)
tau_s : Stator slot pitch (m)
"""
mu_r = 1.06 # relative permeability (probably for neodymium magnets, often given as 1.05 - GNS)
g_1 = airGap + h_m / mu_r # g
b_over_a = slotOpening / (2 * g_1)
gamma = 4 / np.pi * (b_over_a * np.arctan(b_over_a) - np.log(np.sqrt(1 + b_over_a ** 2)))
return slotPitch / (slotPitch - gamma * g_1)
# ---------------
def carterFactorEmpirical(airGap, slotOpening, slotPitch):
"""Return Carter factor using Langsdorff's empirical expression"""
sigma = (slotOpening / airGap) / (5 + slotOpening / airGap)
return slotPitch / (slotPitch - sigma * slotOpening)
# ---------------
def carterFactorSalientPole(airGap, slotWidth, slotPitch):
"""Return Carter factor for salient pole rotor
Where does this equation come from? It's different from other approximations above.
Original code:
tau_s = np.pi * dia / S # slot pitch
b_s = tau_s * b_s_tau_s # slot width
b_t = tau_s - b_s # tooth width
K_C1 = (tau_s + 10 * g_a) / (tau_s - b_s + 10 * g_a) # salient pole rotor
slotPitch - slotWidth == toothWidth
"""
return (slotPitch + 10 * airGap) / (slotPitch - slotWidth + 10 * airGap) # salient pole rotor
# ---------------------------------
def array_seq(q1, b, c, Total_number):
Seq = np.array([1, 0, 0, 1, 0])
diff = Total_number * 5 / 6
G = np.prod(Seq.shape)
return Seq, diff, G
# ---------------------------------
def winding_factor(Sin, b, c, p, m):
S = int(Sin)
# Step 1 Writing q1 as a fraction
q1 = b / c
# Step 2: Writing a binary sequence of b-c zeros and b ones
Total_number = int(S / b)
L = array_seq(q1, b, c, Total_number)
# STep 3 : Repeat binary sequence Q_s/b times
New_seq = np.tile(L[0], Total_number)
Actual_seq1 = pd.DataFrame(New_seq[:, None].T)
Winding_sequence = ["A", "C1", "B", "A1", "C", "B1"]
New_seq2 = np.tile(Winding_sequence, int(L[1]))
Actual_seq2 = pd.DataFrame(New_seq2[:, None].T)
Seq_f = pd.concat([Actual_seq1, Actual_seq2], ignore_index=True)
Seq_f.reset_index(drop=True)
Slots = S
R = S if S % 2 == 0 else S + 1
Windings_arrange = (pd.DataFrame(index=Seq_f.index, columns=Seq_f.columns[1:R])).fillna(0)
counter = 1
# Step #4 Arranging winding in Slots
for i in range(0, len(New_seq)):
if Seq_f.loc[0, i] == 1:
Windings_arrange.loc[0, counter] = Seq_f.loc[1, i]
counter = counter + 1
Windings_arrange.loc[1, 1] = "C1"
for k in range(1, R):
if Windings_arrange.loc[0, k] == "A":
Windings_arrange.loc[1, k + 1] = "A1"
elif Windings_arrange.loc[0, k] == "B":
Windings_arrange.loc[1, k + 1] = "B1"
elif Windings_arrange.loc[0, k] == "C":
Windings_arrange.loc[1, k + 1] = "C1"
elif Windings_arrange.loc[0, k] == "A1":
Windings_arrange.loc[1, k + 1] = "A"
elif Windings_arrange.loc[0, k] == "B1":
Windings_arrange.loc[1, k + 1] = "B"
elif Windings_arrange.loc[0, k] == "C1":
Windings_arrange.loc[1, k + 1] = "C"
Phase_A = np.zeros((1000, 1), dtype=float)
counter_A = 0
# Windings_arrange.to_excel('test.xlsx')
# Winding vector, W_A for Phase A
for l in range(1, R):
if Windings_arrange.loc[0, l] == "A" and Windings_arrange.loc[1, l] == "A":
Phase_A[counter_A, 0] = l
Phase_A[counter_A + 1, 0] = l
counter_A = counter_A + 2
elif Windings_arrange.loc[0, l] == "A1" and Windings_arrange.loc[1, l] == "A1":
Phase_A[counter_A, 0] = -1 * l
Phase_A[counter_A + 1, 0] = -1 * l
counter_A = counter_A + 2
elif Windings_arrange.loc[0, l] == "A" or Windings_arrange.loc[1, l] == "A":
Phase_A[counter_A, 0] = l
counter_A = counter_A + 1
elif Windings_arrange.loc[0, l] == "A1" or Windings_arrange.loc[1, l] == "A1":
Phase_A[counter_A, 0] = -1 * l
counter_A = counter_A + 1
W_A = (np.trim_zeros(Phase_A)).T
# Calculate winding factor
K_w = 0
for r in range(0, int(2 * (S) / 3)):
Gamma = 2 * np.pi * p * np.abs(W_A[0, r]) / S
K_w += np.sign(W_A[0, r]) * (np.exp(Gamma * 1j))
K_w = np.abs(K_w) / (2 * S / 3)
CPMR = np.lcm(S, int(2 * p))
N_cog_s = CPMR / S
N_cog_p = CPMR / p
N_cog_t = CPMR * 0.5 / p
A = np.lcm(S, int(2 * p))
b_p_tau_p = 2 * 1 * p / S - 0
b_t_tau_s = (2) * S * 0.5 / p - 2
return K_w
# ---------------------------------
def shell_constant(R, t, l, x, E, v):
Lambda = (3 * (1 - v ** 2) / (R ** 2 * t ** 2)) ** 0.25
D = E * t ** 3 / (12 * (1 - v ** 2))
C_14 = (np.sinh(Lambda * l)) ** 2 + (np.sin(Lambda * l)) ** 2
C_11 = (np.sinh(Lambda * l)) ** 2 - (np.sin(Lambda * l)) ** 2
F_2 = np.cosh(Lambda * x) * np.sin(Lambda * x) + np.sinh(Lambda * x) * np.cos(Lambda * x)
C_13 = np.cosh(Lambda * l) * np.sinh(Lambda * l) - np.cos(Lambda * l) * np.sin(Lambda * l)
F_1 = np.cosh(Lambda * x) * np.cos(Lambda * x)
F_4 = np.cosh(Lambda * x) * np.sin(Lambda * x) - np.sinh(Lambda * x) * np.cos(Lambda * x)
return D, Lambda, C_14, C_11, F_2, C_13, F_1, F_4
# ---------------------------------
def plate_constant(a, b, E, v, r_o, t):
D = E * t ** 3 / (12 * (1 - v ** 2))
C_2 = 0.25 * (1 - (b / a) ** 2 * (1 + 2 * np.log(a / b)))
C_3 = 0.25 * (b / a) * (((b / a) ** 2 + 1) * np.log(a / b) + (b / a) ** 2 - 1)
C_5 = 0.5 * (1 - (b / a) ** 2)
C_6 = 0.25 * (b / a) * ((b / a) ** 2 - 1 + 2 * np.log(a / b))
C_8 = 0.5 * (1 + v + (1 - v) * (b / a) ** 2)
C_9 = (b / a) * (0.5 * (1 + v) * np.log(a / b) + 0.25 * (1 - v) * (1 - (b / a) ** 2))
L_11 = (1 / 64) * (
1 + 4 * (r_o / a) ** 2 - 5 * (r_o / a) ** 4 - 4 * (r_o / a) ** 2 * (2 + (r_o / a) ** 2) * np.log(a / r_o)
)
L_17 = 0.25 * (1 - 0.25 * (1 - v) * ((1 - (r_o / a) ** 4) - (r_o / a) ** 2 * (1 + (1 + v) * np.log(a / r_o))))
return D, C_2, C_3, C_5, C_6, C_8, C_9, L_11, L_17
# ---------------------------------
debug = False
# ---------------------------------
class GeneratorBase(om.ExplicitComponent):
"""
Base class for generators
Parameters
----------
B_r : float, [T]
Remnant flux density
E : float, [Pa]
youngs modulus
G : float, [Pa]
Shear modulus
P_Fe0e : float, [W/kg]
specific eddy losses @ 1.5T, 50Hz
P_Fe0h : float, [W/kg]
specific hysteresis losses W / kg @ 1.5 T @50 Hz
S_N : float
Slip
alpha_p : float
b_r_tau_r : float
Rotor Slot width / Slot pitch ratio
b_ro : float, [m]
Rotor slot opening width
b_s_tau_s : float
Stator Slot width/Slot pitch ratio
b_so : float, [m]
Stator slot opening width
cofi : float
power factor
freq : float, [Hz]
grid frequency
h_i : float, [m]
coil insulation thickness
h_sy0 : float
h_w : float, [m]
Slot wedge height
k_fes : float
Stator iron fill factor per Grauers
k_fillr : float
Rotor slot fill factor
k_fills : float
Stator Slot fill factor
k_s : float
magnetic saturation factor for iron
m : int
Number of phases
mu_0 : float, [m*kg/s**2/A**2]
permeability of free space
mu_r : float, [m*kg/s**2/A**2]
relative permeability (neodymium)
p : float
number of pole pairs (taken as int within code)
phi : numpy array[90], [rad]
tilt angle (during transportation)
q1 : int
Stator slots per pole per phase
q2 : int
Rotor slots per pole per phase
ratio_mw2pp : float
ratio of magnet width to pole pitch(bm / self.tau_p)
resist_Cu : float, [ohm/m]
Copper resistivity
sigma : float, [Pa]
assumed max shear stress
v : float
poisson ratio
y_tau_p : float
Stator coil span to pole pitch
y_tau_pr : float
Rotor coil span to pole pitch
I_0 : float, [A]
no-load excitation current
T_rated : float, [N*m]
Rated torque
d_r : float, [m]
arm depth d_r
h_m : float, [m]
magnet height
h_0 : float, [m]
Slot height
h_s : float, [m]
Yoke height h_s
len_s : float, [m]
Stator core length
machine_rating : float, [W]
Machine rating
shaft_rpm : numpy array[n_pc], [rpm]
rated speed of input shaft (lss for direct, hss for geared)
n_r : float
number of arms n
rad_ag : float, [m]
airgap radius
t_wr : float, [m]
arm depth thickness
n_s : float
number of stator arms n_s
b_st : float, [m]
arm width b_st
d_s : float, [m]
arm depth d_s
t_ws : float, [m]
arm depth thickness
D_shaft : float, [m]
Shaft diameter
rho_Copper : float, [kg*m**-3]
Copper density
rho_Fe : float, [kg*m**-3]
Magnetic Steel density
rho_Fes : float, [kg*m**-3]
Structural Steel density
rho_PM : float, [kg*m**-3]
Magnet density
Returns
-------
B_rymax : float, [T]
Peak Rotor yoke flux density
B_trmax : float, [T]
maximum tooth flux density in rotor
B_tsmax : float, [T]
maximum tooth flux density in stator
B_g : float, [T]
Peak air gap flux density B_g
B_g1 : float, [T]
air gap flux density fundamental
B_pm1 : float
Fundamental component of peak air gap flux density
N_s : float
Number of turns in the stator winding
b_s : float, [m]
slot width
b_t : float, [m]
tooth width
A_Curcalc : float, [mm**2]
Conductor cross-section mm^2
A_Cuscalc : float, [mm**2]
Stator Conductor cross-section mm^2
b_m : float
magnet width
mass_PM : float, [kg]
Magnet mass
Copper : float, [kg]
Copper Mass
Iron : float, [kg]
Electrical Steel Mass
Structural_mass : float, [kg]
Structural Mass
generator_mass : float, [kg]
Actual mass
f : float
Generator output frequency
I_s : float, [A]
Generator output phase current
R_s : float, [ohm]
Stator resistance
L_s : float
Stator synchronising inductance
J_s : float, [A*m**-2]
Stator winding current density
A_1 : float
Specific current loading
K_rad : float
Stack length ratio
Losses : numpy array[n_pc], [W]
Total loss
generator_efficiency : numpy array[n_pc]
Generator electromagnetic efficiency values (<1)
u_ar : float, [m]
Rotor radial deflection
u_as : float, [m]
Stator radial deflection
u_allow_r : float, [m]
Allowable radial rotor
u_allow_s : float, [m]
Allowable radial stator
y_ar : float, [m]
Rotor axial deflection
y_as : float, [m]
Stator axial deflection
y_allow_r : float, [m]
Allowable axial
y_allow_s : float, [m]
Allowable axial
z_ar : float, [m]
Rotor circumferential deflection
z_as : float, [m]
Stator circumferential deflection
z_allow_r : float, [m]
Allowable circum rotor
z_allow_s : float, [m]
Allowable circum stator
b_allow_r : float, [m]
Allowable arm dimensions
b_allow_s : float, [m]
Allowable arm
TC1 : float, [m**3]
Torque constraint
TC2r : float, [m**3]
Torque constraint-rotor
TC2s : float, [m**3]
Torque constraint-stator
R_out : float, [m]
Outer radius
S : float
Stator slots
Slot_aspect_ratio : float
Slot aspect ratio
Slot_aspect_ratio1 : float
Stator slot aspect ratio
Slot_aspect_ratio2 : float
Rotor slot aspect ratio
D_ratio : float
Stator diameter ratio
J_r : float
Rotor winding Current density
L_sm : float
mutual inductance
Q_r : float
Rotor slots
R_R : float
Rotor resistance
b_r : float
rotor slot width
b_tr : float
rotor tooth width
b_trmin : float
minimum tooth width
"""
def initialize(self):
self.options.declare("n_pc", default=20)
def setup(self):
n_pc = self.options["n_pc"]
# Constants and parameters
self.add_input("B_r", val=1.2, units="T")
self.add_input("E", val=0.0, units="Pa")
self.add_input("G", val=0.0, units="Pa")
self.add_input("P_Fe0e", val=1.0, units="W/kg")
self.add_input("P_Fe0h", val=4.0, units="W/kg")
self.add_input("S_N", val=-0.002)
self.add_input("alpha_p", val=0.5 * np.pi * 0.7)
self.add_input("b_r_tau_r", val=0.45)
self.add_input("b_ro", val=0.004, units="m")
self.add_input("b_s_tau_s", val=0.45)
self.add_input("b_so", val=0.004, units="m")
self.add_input("cofi", val=0.85)
self.add_input("freq", val=60, units="Hz")
self.add_input("h_i", val=0.001, units="m")
self.add_input("h_sy0", val=0.0)
self.add_input("h_w", val=0.005, units="m")
self.add_input("k_fes", val=0.9)
self.add_input("k_fillr", val=0.7)
self.add_input("k_fills", val=0.65)
self.add_input("k_s", val=0.2)
self.add_discrete_input("m", val=3)
self.add_input("mu_0", val=np.pi * 4e-7, units="m*kg/s**2/A**2")
self.add_input("mu_r", val=1.06, units="m*kg/s**2/A**2")
self.add_input("p", val=3.0)
self.add_input("phi", val=np.deg2rad(90), units="rad")
self.add_discrete_input("q1", val=6)
self.add_discrete_input("q2", val=4)
self.add_input("ratio_mw2pp", val=0.7)
self.add_input("resist_Cu", val=1.8e-8 * 1.4, units="ohm/m")
self.add_input("sigma", val=40e3, units="Pa")
self.add_input("v", val=0.3)
self.add_input("y_tau_p", val=1.0)
self.add_input("y_tau_pr", val=10.0 / 12)
# General inputs
# self.add_input('r_s', val=0.0, units='m', desc='airgap radius r_s')
self.add_input("I_0", val=0.0, units="A")
self.add_input("rated_torque", val=0.0, units="N*m")
self.add_input("d_r", val=0.0, units="m")
self.add_input("h_m", val=0.0, units="m")
self.add_input("h_0", val=0.0, units="m")
self.add_input("h_s", val=0.0, units="m")
self.add_input("len_s", val=0.0, units="m")
self.add_input("machine_rating", val=0.0, units="W")
self.add_input("shaft_rpm", val=np.zeros(n_pc), units="rpm")
self.add_input("n_r", val=0.0)
self.add_input("rad_ag", val=0.0, units="m")
self.add_input("t_wr", val=0.0, units="m")
# Structural design variables
self.add_input("n_s", val=0.0)
self.add_input("b_st", val=0.0, units="m")
self.add_input("d_s", val=0.0, units="m")
self.add_input("t_ws", val=0.0, units="m")
self.add_input("D_shaft", val=0.0, units="m")
# Material properties
self.add_input("rho_Copper", val=8900.0, units="kg*m**-3")
self.add_input("rho_Fe", val=7700.0, units="kg*m**-3")
self.add_input("rho_Fes", val=7850.0, units="kg*m**-3")
self.add_input("rho_PM", val=7450.0, units="kg*m**-3")
# Magnetic loading
self.add_output("B_rymax", val=0.0, units="T")
self.add_output("B_trmax", val=0.0, units="T")
self.add_output("B_tsmax", val=0.0, units="T")
self.add_output("B_g", val=0.0, units="T")
self.add_output("B_g1", val=0.0, units="T")
self.add_output("B_pm1", val=0.0)
# Stator design
self.add_output("N_s", val=0.0)
self.add_output("b_s", val=0.0, units="m")
self.add_output("b_t", val=0.0, units="m")
self.add_output("A_Curcalc", val=0.0, units="mm**2")
self.add_output("A_Cuscalc", val=0.0, units="mm**2")
# Rotor magnet dimension
self.add_output("b_m", val=0.0)
# Mass Outputs
self.add_output("mass_PM", val=0.0, units="kg")
self.add_output("Copper", val=0.0, units="kg")
self.add_output("Iron", val=0.0, units="kg")
self.add_output("Structural_mass", val=0.0, units="kg")
self.add_output("generator_mass", val=0.0, units="kg")
# Electrical performance
self.add_output("f", val=np.zeros(n_pc))
self.add_output("I_s", val=np.zeros(n_pc), units="A")
self.add_output("R_s", val=np.zeros(n_pc), units="ohm")
self.add_output("L_s", val=0.0)
self.add_output("J_s", val=np.zeros(n_pc), units="A*m**-2")
self.add_output("A_1", val=np.zeros(n_pc))
# Objective functions
self.add_output("K_rad", val=0.0)
self.add_output("Losses", val=np.zeros(n_pc), units="W")
self.add_output("eandm_efficiency", val=np.zeros(n_pc))
# Structural performance
self.add_output("u_ar", val=0.0, units="m")
self.add_output("u_as", val=0.0, units="m")
self.add_output("u_allow_r", val=0.0, units="m")
self.add_output("u_allow_s", val=0.0, units="m")
self.add_output("y_ar", val=0.0, units="m")
self.add_output("y_as", val=0.0, units="m")
self.add_output("y_allow_r", val=0.0, units="m")
self.add_output("y_allow_s", val=0.0, units="m")
self.add_output("z_ar", val=0.0, units="m")
self.add_output("z_as", val=0.0, units="m")
self.add_output("z_allow_r", val=0.0, units="m")
self.add_output("z_allow_s", val=0.0, units="m")
self.add_output("b_allow_r", val=0.0, units="m")
self.add_output("b_allow_s", val=0.0, units="m")
self.add_output("TC1", val=0.0, units="m**3")
self.add_output("TC2r", val=0.0, units="m**3")
self.add_output("TC2s", val=0.0, units="m**3")
# Other parameters
self.add_output("R_out", val=0.0, units="m")
self.add_output("S", val=0.0)
self.add_output("Slot_aspect_ratio", val=0.0)
self.add_output("Slot_aspect_ratio1", val=0.0)
self.add_output("Slot_aspect_ratio2", val=0.0)
self.add_output("D_ratio", val=0.0)
self.add_output("J_r", val=np.zeros(n_pc))
self.add_output("L_sm", val=0.0)
self.add_output("Q_r", val=0.0)
self.add_output("R_R", val=0.0)
self.add_output("b_r", val=0.0)
self.add_output("b_tr", val=0.0)
self.add_output("b_trmin", val=0.0)
# ----------------------------------------------------------------------------------------
class PMSG_Outer(GeneratorBase):
"""
Estimates overall electromagnetic dimensions and Efficiency of PMSG -arms generator.
Parameters
----------
P_mech : float, [W]
Shaft mechanical power
N_c : float
Number of turns per coil
b : float
Slot pole combination
c : float
Slot pole combination
E_p : float, [V]
Stator phase voltage
h_yr : float, [m]
rotor yoke height
h_ys : float, [m]
Yoke height
h_sr : float, [m]
Structural Mass
h_ss : float, [m]
Stator yoke height
t_r : float, [m]
Rotor disc thickness
t_s : float, [m]
Stator disc thickness
y_sh : float, [m]
Shaft deflection
theta_sh : float, [rad]
slope of shaft
D_nose : float, [m]
Nose outer diameter
y_bd : float, [m]
Deflection of the bedplate
theta_bd : float, [rad]
Slope at the bedplate
u_allow_pcent : float
Radial deflection as a percentage of air gap diameter
y_allow_pcent : float
Radial deflection as a percentage of air gap diameter
z_allow_deg : float, [deg]
Allowable torsional twist
B_tmax : float, [T]
Peak Teeth flux density
Returns
-------
B_smax : float, [T]
Peak Stator flux density
B_symax : float, [T]
Peak Stator flux density
tau_p : float, [m]
Pole pitch
q : float, [N/m**2]
Normal stress
len_ag : float, [m]
Air gap length
h_t : float, [m]
tooth height
tau_s : float, [m]
Slot pitch
J_actual : float, [A/m**2]
Current density
T_e : float, [N*m]
Electromagnetic torque
twist_r : float, [deg]
torsional twist
twist_s : float, [deg]
Stator torsional twist
Structural_mass_rotor : float, [kg]
Rotor mass (kg)
Structural_mass_stator : float, [kg]
Stator mass (kg)
Mass_tooth_stator : float, [kg]
Teeth and copper mass
Mass_yoke_rotor : float, [kg]
Rotor yoke mass
Mass_yoke_stator : float, [kg]
Stator yoke mass
rotor_mass : float, [kg]
Total rotor mass
stator_mass : float, [kg]
Total stator mass
"""
def initialize(self):
super(PMSG_Outer, self).initialize()
def setup(self):
super(PMSG_Outer, self).setup()
n_pc = self.options["n_pc"]
# PMSG_structrual inputs
self.add_input("P_mech", units="W")
self.add_input("N_c", 0.0)
self.add_input("b", 0.0)
self.add_input("c", 0.0)
self.add_input("E_p", 0.0, units="V")
self.add_input("h_yr", val=0.0, units="m")
self.add_input("h_ys", val=0.0, units="m")
self.add_input("h_sr", 0.0, units="m")
self.add_input("h_ss", 0.0, units="m")
self.add_input("t_r", 0.0, units="m")
self.add_input("t_s", 0.0, units="m")
self.add_input("y_sh", units="m")
self.add_input("theta_sh", 0.0, units="rad")
self.add_input("D_nose", 0.0, units="m")
self.add_input("y_bd", units="m")
self.add_input("theta_bd", 0.0, units="rad")
self.add_input("u_allow_pcent", 0.0)
self.add_input("y_allow_pcent", 0.0)
self.add_input("z_allow_deg", 0.0, units="deg")
# Magnetic loading
self.add_input("B_tmax", 0.0, units="T")
self.add_output("B_smax", val=0.0, units="T")
self.add_output("B_symax", val=0.0, units="T")
self.add_output("tau_p", 0.0, units="m")
self.add_output("q", 0.0, units="N/m**2")
self.add_output("len_ag", 0.0, units="m")
# Stator design
self.add_output("h_t", 0.0, units="m")
self.add_output("tau_s", 0.0, units="m")
# Electrical performance
self.add_output("J_actual", val=np.zeros(n_pc), units="A/m**2")
self.add_output("T_e", 0.0, units="N*m")
# Material properties
self.add_output("twist_r", 0.0, units="deg")
self.add_output("twist_s", 0.0, units="deg")
# Mass Outputs
self.add_output("Structural_mass_rotor", 0.0, units="kg")
self.add_output("Structural_mass_stator", 0.0, units="kg")
self.add_output("Mass_tooth_stator", 0.0, units="kg")
self.add_output("Mass_yoke_rotor", 0.0, units="kg")
self.add_output("Mass_yoke_stator", 0.0, units="kg")
self.add_output("rotor_mass", 0.0, units="kg")
self.add_output("stator_mass", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
rad_ag = float(inputs["rad_ag"])
len_s = float(inputs["len_s"])
p = float(inputs["p"])
b = float(inputs["b"])
c = float(inputs["c"])
h_m = float(inputs["h_m"])
h_ys = float(inputs["h_ys"])
h_yr = float(inputs["h_yr"])
h_s = float(inputs["h_s"])
h_ss = float(inputs["h_ss"])
h_0 = float(inputs["h_0"])
B_tmax = float(inputs["B_tmax"])
E_p = float(inputs["E_p"])
P_mech = float(inputs["P_mech"])
P_av_v = float(inputs["machine_rating"])
h_sr = float(inputs["h_sr"])
t_r = float(inputs["t_r"])
t_s = float(inputs["t_s"])
R_sh = 0.5 * float(inputs["D_shaft"])
R_no = 0.5 * float(inputs["D_nose"])
y_sh = float(inputs["y_sh"])
y_bd = float(inputs["y_bd"])
rho_Fes = float(inputs["rho_Fes"])
rho_Fe = float(inputs["rho_Fe"])
sigma = float(inputs["sigma"])
shaft_rpm = inputs["shaft_rpm"]
# Grab constant values
B_r = float(inputs["B_r"])
E = float(inputs["E"])
G = float(inputs["G"])
P_Fe0e = float(inputs["P_Fe0e"])
P_Fe0h = float(inputs["P_Fe0h"])
cofi = float(inputs["cofi"])
h_w = float(inputs["h_w"])
k_fes = float(inputs["k_fes"])
k_fills = float(inputs["k_fills"])
m = int(discrete_inputs["m"])
mu_0 = float(inputs["mu_0"])
mu_r = float(inputs["mu_r"])
p = float(inputs["p"])
phi = float(inputs["phi"])
ratio_mw2pp = float(inputs["ratio_mw2pp"])
resist_Cu = float(inputs["resist_Cu"])
v = float(inputs["v"])
"""
#Assign values to universal constants
B_r = 1.279 # Tesla remnant flux density
E = 2e11 # N/m^2 young's modulus
ratio = 0.8 # ratio of magnet width to pole pitch(bm/self.tau_p)
mu_0 = np.pi*4e-7 # permeability of free space
mu_r = 1.06 # relative permeability
cofi = 0.85 # power factor
#Assign values to design constants
h_0 = 0.005 # Slot opening height
h_w = 0.004 # Slot wedge height
m = 3 # no of phases
#b_s_tau_s = 0.45 # slot width to slot pitch ratio
k_fills = 0.65 # Slot fill factor
P_Fe0h = 4 # specific hysteresis losses W/kg @ 1.5 T
P_Fe0e = 1 # specific hysteresis losses W/kg @ 1.5 T
k_fes = 0.8 # Iron fill factor
#Assign values to universal constants
phi = 90*2*np.pi/360 # tilt angle (rotor tilt -90 degrees during transportation)
v = 0.3 # Poisson's ratio
G = 79.3e9
"""
######################## Electromagnetic design ###################################
K_rad = len_s / (2 * rad_ag) # Aspect ratio
# Calculating air gap length
dia = 2 * rad_ag # air gap diameter
len_ag = 0.001 * dia # air gap length
r_s = rad_ag - len_ag # Stator outer radius
b_so = 2 * len_ag # Slot opening
tau_p = np.pi * dia / (2 * p) # pole pitch
# Calculating winding factor
Slot_pole = b / c
S = Slot_pole * 2 * p * m
testval = S / (m * np.gcd(int(S), int(p)))
if float(np.round(testval, 3)).is_integer():
k_w = winding_factor(int(S), b, c, int(p), m)
b_m = ratio_mw2pp * tau_p # magnet width
alpha_p = np.pi / 2 * ratio_mw2pp
tau_s = np.pi * (dia - 2 * len_ag) / S
# Calculating Carter factor for statorand effective air gap length
gamma = (
4
/ np.pi
* (
b_so / 2 / (len_ag + h_m / mu_r) * np.arctan(b_so / 2 / (len_ag + h_m / mu_r))
- np.log(np.sqrt(1 + (b_so / 2 / (len_ag + h_m / mu_r)) ** 2))
)
)
k_C = tau_s / (tau_s - gamma * (len_ag + h_m / mu_r)) # carter coefficient
g_eff = k_C * (len_ag + h_m / mu_r)
# angular frequency in radians
om_m = 2 * np.pi * shaft_rpm / 60
om_e = p * om_m
freq = om_e / 2 / np.pi # outout frequency
# Calculating magnetic loading
B_pm1 = B_r * h_m / mu_r / (g_eff)
B_g = B_r * h_m / (mu_r * g_eff) * (4 / np.pi) * np.sin(alpha_p)
B_symax = B_pm1 * b_m / (2 * h_ys) * k_fes
B_rymax = B_pm1 * b_m * k_fes / (2 * h_yr)
b_t = B_pm1 * tau_s / B_tmax
N_c = 2 # Number of turns per coil
q = (B_g) ** 2 / 2 / mu_0
# Stator winding length ,cross-section and resistance
l_Cus = 2 * (len_s + np.pi / 4 * (tau_s + b_t)) # length of a turn
# Calculating no-load voltage induced in the stator
N_s = np.rint(E_p / (np.sqrt(2) * len_s * r_s * k_w * om_m * B_g))
# Z = P_av_v / (m*E_p)
# Calculating leakage inductance in stator
V_1 = E_p / 1.1
I_n = P_av_v / 3 / cofi / V_1
J_s = 6.0
A_Cuscalc = I_n / J_s
A_slot = 2 * N_c * A_Cuscalc * (10 ** -6) / k_fills
tau_s_new = np.pi * (dia - 2 * len_ag - 2 * h_w - 2 * h_0) / S
b_s2 = tau_s_new - b_t # Slot top width
b_s1 = np.sqrt(b_s2 ** 2 - 4 * np.pi * A_slot / S)
b_s = (b_s1 + b_s2) * 0.5
N_coil = 2 * S
P_s = mu_0 * (h_s / 3 / b_s + h_w * 2 / (b_s2 + b_so) + h_0 / b_so) # Slot permeance function
L_ssigmas = S / 3 * 4 * N_c ** 2 * len_s * P_s # slot leakage inductance
L_ssigmaew = (
N_coil * N_c ** 2 * mu_0 * tau_s * np.log((0.25 * np.pi * tau_s ** 2) / (0.5 * h_s * b_s))
) # end winding leakage inductance
L_aa = 2 * np.pi / 3 * (N_c ** 2 * mu_0 * len_s * r_s / g_eff)
L_m = L_aa
L_ssigma = L_ssigmas + L_ssigmaew
L_s = L_m + L_ssigma
G_leak = np.abs((1.1 * E_p) ** 4 - (1 / 9) * (P_av_v * om_e * L_s) ** 2)
# Calculating stator current and electrical loading
I_s = np.sqrt(2 * (np.abs((E_p * 1.1) ** 2 - G_leak ** 0.5)) / (om_e * L_s) ** 2)
A_1 = 6 * I_s * N_s / np.pi / dia
J_actual = I_s / (A_Cuscalc * 2 ** 0.5)
L_Cus = N_s * l_Cus
R_s = inputs["resist_Cu"] * (N_s) * l_Cus / (A_Cuscalc * (10 ** -6))
B_smax = np.sqrt(2) * I_s * mu_0 / g_eff
# Calculating Electromagnetically active mass
wedge_area = (b_s * 0.5 - b_so * 0.5) * (2 * h_0 + h_w)
V_Cus = m * L_Cus * (A_Cuscalc * (10 ** -6)) # copper volume
h_t = h_s + h_w + h_0
V_Fest = len_s * S * (b_t * (h_s + h_w + h_0) + wedge_area) # volume of iron in stator tooth
V_Fesy = (
len_s
* np.pi
* ((rad_ag - len_ag - h_s - h_w - h_0) ** 2 - (rad_ag - len_ag - h_s - h_w - h_0 - h_ys) ** 2)
) # volume of iron in stator yoke
V_Fery = len_s * np.pi * ((rad_ag + h_m + h_yr) ** 2 - (rad_ag + h_m) ** 2)
Copper = V_Cus[-1] * inputs["rho_Copper"]
M_Fest = V_Fest * rho_Fe # Mass of stator tooth
M_Fesy = V_Fesy * rho_Fe # Mass of stator yoke
M_Fery = V_Fery * rho_Fe # Mass of rotor yoke
Iron = M_Fest + M_Fesy + M_Fery
mass_PM = 2 * np.pi * (rad_ag + h_m) * len_s * h_m * ratio_mw2pp * inputs["rho_PM"]
# Calculating Losses
##1. Copper Losses
K_R = 1.0 # Skin effect correction co-efficient
P_Cu = m * (I_s / 2 ** 0.5) ** 2 * R_s * K_R
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyys = (
M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60))
) # Hysteresis losses in stator yoke
P_Ftys = (
M_Fesy * ((B_symax / 1.5) ** 2) * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator yoke
P_Fesynom = P_Hyys + P_Ftys
P_Hyd = (
M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60))
) # Hysteresis losses in stator teeth
P_Ftd = (
M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator teeth
P_Festnom = P_Hyd + P_Ftd
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyyr = (
M_Fery * (B_rymax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60))
) # Hysteresis losses in stator yoke
P_Ftyr = (
M_Fery * ((B_rymax / 1.5) ** 2) * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator yoke
P_Ferynom = P_Hyyr + P_Ftyr
# additional stray losses due to leakage flux
P_ad = 0.2 * (P_Hyys + P_Ftys + P_Hyd + P_Ftd + P_Hyyr + P_Ftyr)
pFtm = 300 # specific magnet loss
P_Ftm = pFtm * 2 * p * b_m * len_s
Losses = P_Cu + P_Festnom + P_Fesynom + P_ad + P_Ftm + P_Ferynom
gen_eff = (P_mech - Losses) / (P_mech)
I_snom = gen_eff * (P_mech / m / E_p / cofi) # rated current
I_qnom = gen_eff * P_mech / (m * E_p)
X_snom = om_e * (L_m + L_ssigma)
T_e = np.pi * rad_ag ** 2 * len_s * 2 * sigma
Stator = M_Fesy + M_Fest + Copper # modified mass_stru_steel
Rotor = M_Fery + mass_PM # modified (N_r*(R_1-self.R_sh)*a_r*self.rho_Fes))
Mass_tooth_stator = M_Fest + Copper
Mass_yoke_rotor = M_Fery
Mass_yoke_stator = M_Fesy
R_out = (dia + 2 * h_m + 2 * h_yr + 2 * inputs["h_sr"]) * 0.5
Losses = Losses
generator_efficiency = gen_eff
else:
# Bad design
for k in outputs.keys():
outputs[k] = 1e30
return
######################## Rotor inactive (structural) design ###################################
# Radial deformation of rotor
R = rad_ag + h_m
L_r = len_s + t_r + 0.125
constants_x_0 = shell_constant(R, t_r, L_r, 0, E, v)
constants_x_L = shell_constant(R, t_r, L_r, L_r, E, v)
f_d_denom1 = R / (E * ((R) ** 2 - (R_sh) ** 2)) * ((1 - v) * R ** 2 + (1 + v) * (R_sh) ** 2)
f_d_denom2 = (
t_r
/ (2 * constants_x_0[0] * (constants_x_0[1]) ** 3)
* (
constants_x_0[2] / (2 * constants_x_0[3]) * constants_x_0[4]
- constants_x_0[5] / constants_x_0[3] * constants_x_0[6]
- 0.5 * constants_x_0[7]
)
)
f = q * (R) ** 2 * t_r / (E * (h_yr + h_sr) * (f_d_denom1 + f_d_denom2))
u_d = (
f
/ (constants_x_L[0] * (constants_x_L[1]) ** 3)
* (
(
constants_x_L[2] / (2 * constants_x_L[3]) * constants_x_L[4]
- constants_x_L[5] / constants_x_L[3] * constants_x_L[6]
- 0.5 * constants_x_L[7]
)
)
+ y_sh
)
u_ar = (q * (R) ** 2) / (E * (h_yr + h_sr)) - u_d
u_ar = np.abs(u_ar + y_sh)
u_allow_r = 2 * rad_ag / 1000 * inputs["u_allow_pcent"] / 100
# axial deformation of rotor
W_back_iron = plate_constant(R + h_sr + h_yr, R_sh, E, v, 0.5 * h_yr + R, t_r)
W_ssteel = plate_constant(R + h_sr + h_yr, R_sh, E, v, h_yr + R + h_sr * 0.5, t_r)
W_mag = plate_constant(R + h_sr + h_yr, R_sh, E, v, h_yr + R - 0.5 * h_m, t_r)
W_ir = rho_Fe * gravity * np.sin(phi) * (L_r - t_r) * h_yr
y_ai1r = (
-W_ir
* (0.5 * h_yr + R) ** 4
/ (R_sh * W_back_iron[0])
* (W_back_iron[1] * W_back_iron[4] / W_back_iron[3] - W_back_iron[2])
)
W_sr = rho_Fes * gravity * np.sin(phi) * (L_r - t_r) * h_sr
y_ai2r = (
-W_sr
* (h_sr * 0.5 + h_yr + R) ** 4
/ (R_sh * W_ssteel[0])
* (W_ssteel[1] * W_ssteel[4] / W_ssteel[3] - W_ssteel[2])
)
W_m = np.sin(phi) * mass_PM / (2 * np.pi * (R - h_m * 0.5))
y_ai3r = -W_m * (R - h_m) ** 4 / (R_sh * W_mag[0]) * (W_mag[1] * W_mag[4] / W_mag[3] - W_mag[2])
w_disc_r = rho_Fes * gravity * np.sin(phi) * t_r
a_ii = R + h_sr + h_yr
r_oii = R_sh
M_rb = (
-w_disc_r
* a_ii ** 2
/ W_ssteel[5]
* (W_ssteel[6] * 0.5 / (a_ii * R_sh) * (a_ii ** 2 - r_oii ** 2) - W_ssteel[8])
)
Q_b = w_disc_r * 0.5 / R_sh * (a_ii ** 2 - r_oii ** 2)
y_aiir = (
M_rb * a_ii ** 2 / W_ssteel[0] * W_ssteel[1]
+ Q_b * a_ii ** 3 / W_ssteel[0] * W_ssteel[2]
- w_disc_r * a_ii ** 4 / W_ssteel[0] * W_ssteel[7]
)
I = np.pi * 0.25 * (R ** 4 - (R_sh) ** 4)
F_ecc = q * 2 * np.pi * K_rad * rad_ag ** 3
M_ar = F_ecc * L_r * 0.5
y_ar = (
np.abs(y_ai1r + y_ai2r + y_ai3r)
+ y_aiir
+ (R + h_yr + h_sr) * inputs["theta_sh"]
+ M_ar * L_r ** 2 * 0 / (2 * E * I)
)
y_allow_r = L_r / 100 * inputs["y_allow_pcent"]
# Torsional deformation of rotor
J_dr = 0.5 * np.pi * ((R + h_yr + h_sr) ** 4 - R_sh ** 4)
J_cylr = 0.5 * np.pi * ((R + h_yr + h_sr) ** 4 - R ** 4)
twist_r = 180 / np.pi * inputs["rated_torque"] / G * (t_r / J_dr + (L_r - t_r) / J_cylr)
Structural_mass_rotor = (
rho_Fes
* np.pi
* (((R + h_yr + h_sr) ** 2 - (R_sh) ** 2) * t_r + ((R + h_yr + h_sr) ** 2 - (R + h_yr) ** 2) * len_s)
)
TC1 = inputs["rated_torque"] / (2 * np.pi * sigma)
TC2r = (R + (h_yr + h_sr)) ** 2 * L_r
######################## Stator inactive (structural) design ###################################
# Radial deformation of Stator
L_stator = len_s + t_s + 0.1
R_stator = rad_ag - len_ag - h_t - h_ys - h_ss
constants_x_0 = shell_constant(R_stator, t_s, L_stator, 0, E, v)
constants_x_L = shell_constant(R_stator, t_s, L_stator, L_stator, E, v)
f_d_denom1 = (
R_stator / (E * ((R_stator) ** 2 - (R_no) ** 2)) * ((1 - v) * R_stator ** 2 + (1 + v) * (R_no) ** 2)
)
f_d_denom2 = (
t_s
/ (2 * constants_x_0[0] * (constants_x_0[1]) ** 3)
* (
constants_x_0[2] / (2 * constants_x_0[3]) * constants_x_0[4]
- constants_x_0[5] / constants_x_0[3] * constants_x_0[6]
- 0.5 * constants_x_0[7]
)
)
f = q * (R_stator) ** 2 * t_s / (E * (h_ys + h_ss) * (f_d_denom1 + f_d_denom2))
# TODO: Adds y_bd twice?
u_as = (
(q * (R_stator) ** 2) / (E * (h_ys + h_ss))
- f
* 0
/ (constants_x_L[0] * (constants_x_L[1]) ** 3)
* (
(
constants_x_L[2] / (2 * constants_x_L[3]) * constants_x_L[4]
- constants_x_L[5] / constants_x_L[3] * constants_x_L[6]
- 1 / 2 * constants_x_L[7]
)
)
+ y_bd
)
u_as = np.abs(u_as + y_bd)
u_allow_s = 2 * rad_ag / 1000 * inputs["u_allow_pcent"] / 100
# axial deformation of stator
W_back_iron = plate_constant(R_stator + h_ss + h_ys + h_t, R_no, E, v, 0.5 * h_ys + h_ss + R_stator, t_s)
W_ssteel = plate_constant(R_stator + h_ss + h_ys + h_t, R_no, E, v, R_stator + h_ss * 0.5, t_s)
W_active = plate_constant(R_stator + h_ss + h_ys + h_t, R_no, E, v, R_stator + h_ss + h_ys + h_t * 0.5, t_s)
W_is = rho_Fe * gravity * np.sin(phi) * (L_stator - t_s) * h_ys
y_ai1s = (
-W_is
* (0.5 * h_ys + R_stator) ** 4
/ (R_no * W_back_iron[0])
* (W_back_iron[1] * W_back_iron[4] / W_back_iron[3] - W_back_iron[2])
)
W_ss = rho_Fes * gravity * np.sin(phi) * (L_stator - t_s) * h_ss
y_ai2s = (
-W_ss
* (h_ss * 0.5 + h_ys + R_stator) ** 4
/ (R_no * W_ssteel[0])
* (W_ssteel[1] * W_ssteel[4] / W_ssteel[3] - W_ssteel[2])
)
W_cu = np.sin(phi) * Mass_tooth_stator / (2 * np.pi * (R_stator + h_ss + h_ys + h_t * 0.5))
y_ai3s = (
-W_cu
* (R_stator + h_ss + h_ys + h_t * 0.5) ** 4
/ (R_no * W_active[0])
* (W_active[1] * W_active[4] / W_active[3] - W_active[2])
)
w_disc_s = rho_Fes * gravity * | np.sin(phi) | numpy.sin |
"""
Author : <NAME>
01 October 2021
Hacktoberfest Mozilla Campus Club
Cummins College of Engineering for Women Pune
"""
import re
import numpy as np
#makes all the ones that are part of the same island 0
def remove_ones(x,y):
global r
global c
global grid
#check that indices x and y exist in grid
if (x<0 or x>=r or y<0 or y>=c):
return
if grid[x][y]==0:
return
#Marks the cell as 0
grid[x][y] = 0
#this function should keep calling itself till the entire island
#has been traversed and all the ones in it made to 0
#checking
#horizontal and vertical
remove_ones(x+1,y)
remove_ones(x-1,y)
remove_ones(x,y+1)
remove_ones(x,y-1)
#diagonal
remove_ones(x+1,y+1)
remove_ones(x+1,y-1)
remove_ones(x-1,y+1)
remove_ones(x-1,y-1)
def iterate():
count=0
#this is simple ireator that calls the remove_ones
#function for the first time for a particular island
for i in range(0,r):
for j in range(0,c):
if grid[i][j]==1:
count+=1
remove_ones(i,j)
#print(grid)
##uncomment above to visialize the islands being removed
return(count)
#the grid must be entered in the following format {{0,1},{1,0},{1,1},{1,0}}
s=input("grid = ")
#no of rows
r=s.count("{") - 1
#grid is initially a 1D numpy array
#the regex removes all the characters that are not 0 or 1
grid=np.array(list(re.sub(r"[^0-1]","",s)), dtype= int)
#reshape the grid
c=(int) (len(grid)/r) #columns
grid = | np.reshape(grid, (r,c)) | numpy.reshape |
__author__ = 'sibirrer'
# this file contains a class to compute the Navaro-Frank-White function in mass/kappa space
# the potential therefore is its integral
import numpy as np
class NFWt(object):
"""
this class contains functions concerning the truncated NFW profile with truncation function (t^2 / (r^2 + t^2))
detailed in Baltz et al 2008
relation are: R_200 = c * Rs
"""
def function(self, x, y, Rs, theta_Rs, t, center_x=0, center_y=0):
"""
:param x: angular position
:param y: angular position
:param Rs: angular turn over point
:param theta_Rs: deflection at Rs
:param t: truncation radius (angular units)
:param center_x: center of halo
:param center_y: center of halo
:return:
"""
rho0_input = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0001:
Rs = 0.0001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
f_ = self.nfwPot(R, Rs, rho0_input, t)
return f_
def derivatives(self, x, y, Rs, theta_Rs, t, center_x=0, center_y=0):
"""
returns df/dx and df/dy of the function (integral of NFW)
"""
rho0_input = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0001:
Rs = 0.0001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
f_x, f_y = self.nfwAlpha(R, Rs, rho0_input, t, x_, y_)
return f_x, f_y
def hessian(self, x, y, Rs, theta_Rs, t, center_x=0, center_y=0):
"""
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
"""
rho0_input = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0001:
Rs = 0.0001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
kappa = self.density_2d(R, 0, Rs, rho0_input, t)
gamma1, gamma2 = self.nfwGamma(R, Rs, rho0_input, t, x_, y_)
f_xx = kappa + gamma1
f_yy = kappa - gamma1
f_xy = gamma2
return f_xx, f_yy, f_xy
def density(self, R, Rs, rho0, t):
"""
three dimenstional NFW profile
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param t: truncation radius (angular units)
:return: rho(R) density
"""
return rho0 / (R / Rs * (1 + R / Rs) ** 2) * (t**2*(R**2 + t**2)**-1)
def density_2d(self, x, y, Rs, rho0, t, center_x=0, center_y=0):
"""
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param t: truncation radius (angular units)
:return: Epsilon(R) projected density at radius R
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
x = R / Rs
Fx = self._F(x,t)
return 2 * rho0 * Rs * Fx
def mass_3d(self, R, Rs, rho0, t):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:param t: truncation radius (angular units)
:return:
"""
raise ValueError('not yet implemented')
def mass_3d_lens(self, R, Rs, theta_Rs, t):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:param t: truncation radius (angular units)
:return:
"""
raise ValueError('not yet implemented')
def mass_2d(self, R, Rs, rho0, t):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:param t: truncation radius (angular units)
:return:
"""
raise ValueError('not yet implemented')
def nfw2D_smoothed(self, R, Rs, rho0, t, pixscale):
"""
projected two dimenstional NFW profile with smoothing around the pixel scale
this routine is ment to better compare outputs to N-body simulations (not ment ot do lensemodelling with it)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param pixscale: pixel scale (same units as R,Rs)
:type pixscale: float>0
:param t: truncation radius (angular units)
:return: Epsilon(R) projected density at radius R
"""
x = R / Rs
d = pixscale / (2 * Rs)
a = np.empty_like(x)
x_ = x[x > d]
upper = x_ + d
lower = x_ - d
a[x > d] = 4 * rho0 * Rs ** 3 * (self._g(upper,t) - self._g(lower,t)) / (2 * x_ * Rs * pixscale)
a[x < d] = 4 * rho0 * Rs ** 3 * self._g(d,t) / ((pixscale / 2) ** 2)
return a
def nfwPot(self, R, Rs, rho0, t):
"""
lensing potential of NFW profile (*Sigma_crit*D_OL**2)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param t: truncation radius (angular units)
:return: Epsilon(R) projected density at radius R
"""
x = R / Rs
hx = self._h(x, t)
return 2 * rho0 * Rs ** 3 * hx
def nfwAlpha(self, R, Rs, rho0, t, ax_x, ax_y):
"""
deflection angel of NFW profile (*Sigma_crit*D_OL) along the projection to coordinate "axis"
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param axis: projection to either x- or y-axis
:type axis: same as R
:param t: truncation radius (angular units)
:return: Epsilon(R) projected density at radius R
"""
if isinstance(R, int) or isinstance(R, float):
R = max(R, 0.00001)
else:
R[R <= 0.00001] = 0.00001
x = R / Rs
gx = self._g(x,t)
a = 4 * rho0 * Rs * R * gx / x ** 2 / R
return a * ax_x, a * ax_y
def nfwGamma(self, R, Rs, rho0, t, ax_x, ax_y):
"""
shear gamma of NFW profile (*Sigma_crit) along the projection to coordinate "axis"
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param axis: projection to either x- or y-axis
:type axis: same as R
:return: Epsilon(R) projected density at radius R
"""
c = 0.001
if isinstance(R, int) or isinstance(R, float):
R = max(R, c)
else:
R[R <= c] = c
x = R/Rs
gx = self._g(x,t)
Fx = self._F(x,t)
a = 2*rho0*Rs*(2*gx/x**2 - Fx)#/x #2*rho0*Rs*(2*gx/x**2 - Fx)*axis/x
return a*(ax_y**2-ax_x**2)/R**2, -a*2*(ax_x*ax_y)/R**2
def tau_deflection_factor(self, x, tau):
return tau ** 2 * (tau ** 2 + 1) ** -2 * (
(tau ** 2 + 1 + 2 * (x ** 2 - 1)) * self._func(x) + tau * np.pi + (tau ** 2 - 1) * np.log(tau) +
np.sqrt(tau ** 2 + x ** 2) * (-np.pi + self._Log(x, tau) * (tau ** 2 - 1) * tau ** -1))
def _func(self,x):
if isinstance(x, np.ndarray):
nfwvals = np.ones_like(x)
inds1 = np.where(x < 1)
inds2 = np.where(x > 1)
nfwvals[inds1] = (1 - x[inds1] ** 2) ** -.5 * np.arccosh(x[inds1]**-1)
nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -.5 * np.arccos(x[inds2]**-1)
return nfwvals
elif isinstance(x, float) or isinstance(x, int):
if x == 1:
return 1
if x < 1:
return (1 - x ** 2) ** -.5 * np.arctanh((1 - x ** 2) ** .5)
else:
return (x ** 2 - 1) ** -.5 * np.arctan((x ** 2 - 1) ** .5)
def _F(self, X,t):
"""
analytic solution of the projection integral
:param x: R/Rs
:type x: float >0
"""
c = 0.001
t2 = t**2
t2p = (t2+1)**2
t2m = (t2-1)**2
if isinstance(X, int) or isinstance(X, float):
if X < 1:
x = max(c, X)
cos = np.arccosh(x ** -1)
F = cos * (1 - x ** 2) ** -.5
elif X == 1:
cos = 0
F = 1
else: # X > 1:
cos = np.arccos(X ** -1)
F = cos * (X ** 2 - 1) ** -.5
a = t2*t2p**-1*(t2p*(X**2-1)**-1 * (1-F) +2*F - np.pi*(t2+X**2)**-.5 +
t2m * self._Log(X,t)*(t*(t2+X**2)**.5)**-1)
else:
a = np.empty_like(X)
X[X < c] = c
x = X[X < 1]
cos = | np.arccosh(x ** -1) | numpy.arccosh |
"""
.. module:: model_fitter
:platform: Unix, Mac, Windows
:synopsis: Microlensing model fitter.
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME>
"""
from pymultinest.solve import Solver
import os
from astropy.table.row import Row
import glob
import math
import numpy as np
import pylab as plt
import scipy.stats
import pymultinest
import src.BAGLE.model as mmodel
from astropy.table import Table
from astropy.table import Row
from astropy import units
from astropy.stats import sigma_clipped_stats
import json
from string import digits
import copy
import pdb
from datetime import date
import yaml
from dynesty import plotting as dyplot
from six.moves import range
import matplotlib.patches as mpatches
import logging
import types
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
from scipy import spatial
from scipy.ndimage import gaussian_filter as norm_kde
from scipy.stats import gaussian_kde
import warnings
from dynesty.utils import resample_equal, unitcheck
from dynesty.utils import quantile as _quantile
import re
try:
str_type = types.StringTypes
float_type = types.FloatType
int_type = types.IntType
except:
str_type = str
float_type = float
int_type = int
muS_scale_factor = 100.0
# Global variable to define all array-style parameters (i.e. multiple filters).
multi_filt_params = ['b_sff', 'mag_src', 'mag_base', 'add_err', 'mult_err',
'mag_src_pri', 'mag_src_sec', 'fratio_bin',
'gp_log_sigma', 'gp_log_rho', 'gp_log_S0', 'gp_log_omega0', 'gp_rho',
'gp_log_omega04_S0', 'gp_log_omega0', 'add_err', 'mult_err']
class PSPL_Solver(Solver):
"""
A PyMultiNest solver to find the optimal PSPL parameters, given data and
a microlensing model from model.py.
DESPITE THE NAME YOU CAN ALSO USE IT TO FIT PSBL!
Attributes
-----------
data : dictionary
Observational data used to fit a microlensing model. What the data must
contain depends on what type of microlensing model you are solving for.
The data dictionary must always photometry information of at least one
filter. This data must contain the times, magnitudes, and magnitude
errors of the observations. The keys to these arrays are:
* `t_phot1` (MJD)
* `mag1` (magnitudes)
* `mag_err1` (magnitudes)
PSPL_Solver supports multiple photometric filters. For each
additional filter, increments the extension of the above keys by one.
For example, a second filter would be:
* `t_phot2` (MJD)
* `mag2` (magnitudes)
* `mag_err2` (magnitudes)
PSPL_Solver supports solving microlensing models that calculate with
parallax. These models must be accompanied with data that contains the
right ascenscion and declination of the target. These keys are:
* `raL` (decimal degrees)
* `decL` (decimal degrees)
PSPL_Solver supports solving microlensing models that fit astrometry.
These models must be accompanied with data that contains astrometric
observations in the following keys:
* `t_ast` (MJD)
* `xpos` (arcsec along East-West increasing to the East)
* `ypos` (arcsec along the North-South increasing to the North)
* `xpos_err` (arcsec)
* `ypos_err` (arcsec)
model_class :
PSPL_Solver must be provided with the microlensing model that you are
trying to fit to your data. These models are written out in model.py,
along with extensive documentation as to their content and
construction in the file's docstring. The model can support either
1. photometric data or photometric and astrometric data,
2. parallax or no parallax, and
3. different parameterizations of the model.
For example, a model with accepts both astrometric and photometric
data, uses parallax, and uses a parameterization that includes the
distance to the source and the lens is: `PSPL_PhotAstrom_Par_Param1`.
custom_additional_param_names : list, optional
If provided, the fitter will override the default
`additional_param_names` of the model_class. These are the parameters,
besides those that are being fitted for, that are written out to disk
for posterior plotting after the fit has completed. To see the default
additional_param_names run:
`print(model_class.additional _param_names)`
add_error_on_photometry : boolean, optional
If set to True, the fitter will fit for an additive error to the
photometric magnitudes in the fitting process. This error will have
the name `add_errN`, with an `N` equal to the filter number.
multiply_error_on_photometry : boolean, optional
If set to True, the fitter will fit for a multiplicative error to the
photometric magnitudes in the fitting process. This error will have
the name `mult_errN`, with an `N` equal to the filter number.
All other parameters :
See pymultinest.run() for a description of all other parameters.
Examples
-------------------
Assuming that a data dictionary has been instantiated with the above keys,
and that a model has been loaded in from model.py, PSPL_Solver can be run
with the following commands:
.. code::
fitter = PSPL_Solver(data,
PSPL_PhotAstrom_Par_Param1,
add_error_on_photometry=True,
custom_additional_param_names=['dS', 'tE'],
outputfiles_basename='./model_output/test_')
fitter.solve()
"""
default_priors = {
'mL': ('make_gen', 0, 100),
't0': ('make_t0_gen', None, None),
't0_prim': ('make_t0_gen', None, None),
'xS0_E': ('make_xS0_gen', None, None),
'xS0_N': ('make_xS0_gen', None, None),
'u0_amp': ('make_gen', -1, 1),
'u0_amp_prim': ('make_gen', -1, 1),
'beta': ('make_gen', -2, 2),
'muL_E': ('make_gen', -20, 20),
'muL_N': ('make_gen', -20, 20),
'muS_E': ('make_muS_EN_gen', None, None),
'muS_N': ('make_muS_EN_gen', None, None),
'dL': ('make_gen', 1000, 8000),
'dS': ('make_gen', 100, 10000),
'dL_dS': ('make_gen', 0.01, 0.99),
'b_sff': ('make_gen', 0.0, 1.5),
'mag_src': ('make_mag_src_gen', None, None),
'mag_src_pri': ('make_mag_src_gen', None, None),
'mag_src_sec': ('make_mag_src_gen', None, None),
'mag_base': ('make_mag_base_gen', None, None),
'tE': ('make_gen', 1, 400),
'piE_E': ('make_gen', -1, 1),
'piE_N': ('make_gen', -1, 1),
'piEN_piEE' : ('make_gen', -10, 10),
'thetaE': ('make_lognorm_gen', 0, 1),
'log10_thetaE': ('make_truncnorm_gen', -0.2, 0.3, -4, 4),
'q': ('make_gen', 0.001, 1),
'alpha': ('make_gen', 0, 360),
'phi': ('make_gen', 0, 360),
'sep': ('make_gen', 1e-4, 2e-2),
'piS': ('make_piS', None, None),
'add_err': ('make_gen', 0, 0.3),
'mult_err': ('make_gen', 1.0, 3.0),
'radius': ('make_gen', 1E-4, 1E-2),
'fratio_bin': ('make_gen', 0, 1),
# We really need to make some normal distributions. All these are junk right now.
'gp_log_rho': ('make_norm_gen', 0, 5),
'gp_log_S0': ('make_norm_gen', 0, 5),
'gp_log_sigma': ('make_norm_gen', 0, 5),
'gp_rho':('make_invgamma_gen', None, None),
'gp_log_omega04_S0':('make_norm_gen', 0, 5), # FIX... get from data
'gp_log_omega0':('make_norm_gen', 0, 5)
}
def __init__(self, data, model_class,
custom_additional_param_names=None,
add_error_on_photometry=False,
multiply_error_on_photometry=False,
use_phot_optional_params=True,
use_ast_optional_params=True,
wrapped_params=None,
importance_nested_sampling=False,
multimodal=True, const_efficiency_mode=False,
n_live_points=300,
evidence_tolerance=0.5, sampling_efficiency=0.8,
n_iter_before_update=100, null_log_evidence=-1e90,
max_modes=100, mode_tolerance=-1e90,
outputfiles_basename="chains/1-", seed=-1, verbose=False,
resume=False, context=0, write_output=True, log_zero=-1e100,
max_iter=0, init_MPI=False, dump_callback=None):
"""
Accepted optional inputs are the same as on pymultinest.run().
Note that prior distributions are defined upon initiatlization and
can be modified on the object before running solve().
Parameters
---------------
use_phot_optional_params : bool, or list of bools, optional
optional photometry parameters
"""
# Set the data, model, and error modes
self.data = data
self.model_class = model_class
self.add_error_on_photometry = add_error_on_photometry
self.multiply_error_on_photometry = multiply_error_on_photometry
self.use_phot_optional_params = use_phot_optional_params
self.use_ast_optional_params = use_ast_optional_params
# Check the data
self.check_data()
# list of all possible multi-filt, multi-phot, multi-ast parameters that anyone
# could ever possibly use.
self.multi_filt_params = multi_filt_params
self.gp_params = ['gp_log_sigma', 'gp_log_rho', 'gp_log_S0', 'gp_log_omega0', 'gp_rho',
'gp_log_omega04_S0', 'gp_log_omega0']
# Set up parameterization of the model
self.remove_digits = str.maketrans('', '', digits) # removes nums from strings
self.custom_additional_param_names = custom_additional_param_names
self.n_phot_sets = None
self.n_ast_sets = None
self.fitter_param_names = None
self.additional_param_names = None
self.all_param_names = None
self.n_dims = None
self.n_params = None
self.n_clustering_params = None
self.setup_params()
# Set multinest stuff
self.multimodal = multimodal
self.wrapped_params = wrapped_params
self.importance_nested_sampling = importance_nested_sampling
self.const_efficiency_mode = const_efficiency_mode
self.n_live_points = n_live_points
self.evidence_tolerance = evidence_tolerance
self.sampling_efficiency = sampling_efficiency
self.n_iter_before_update = n_iter_before_update
self.null_log_evidence = null_log_evidence
self.max_modes = max_modes
self.mode_tolerance = mode_tolerance
self.outputfiles_basename = outputfiles_basename
self.seed = seed
self.verbose = verbose
self.resume = resume
self.context = context
self.write_output = write_output
self.log_zero = log_zero
self.max_iter = max_iter
self.init_MPI = init_MPI
if dump_callback is None:
self.dump_callback = self.callback_plotter
else:
self.dump_callback = dump_callback
# Setup the default priors
self.priors = None
# self.priors = {}
self.make_default_priors()
# Stuff needed for using multinest posteriors as priors.
self.post_param_cdf = None
self.post_param_names = None
self.post_param_bininds = None
self.post_param_bins = None
# Make the output directory if doesn't exist
if os.path.dirname(outputfiles_basename) != '':
os.makedirs(os.path.dirname(outputfiles_basename), exist_ok=True)
return
def check_data(self):
if 't_ast1' in self.data.keys():
if not self.model_class.paramAstromFlag or \
not self.model_class.astrometryFlag:
print('***** WARNING: ASTROMETRY DATA WILL NOT BE FIT '
'BY %s *****' % str(self.model_class))
else:
if self.model_class.paramAstromFlag or \
self.model_class.astrometryFlag:
raise RuntimeError('Astrometry data required to '
'run %s' % str(self.model_class))
if 't_phot1' in self.data.keys():
if not self.model_class.paramPhotFlag or \
not self.model_class.photometryFlag:
print('***** WARNING: PHOTOMETRY DATA WILL NOT BE FIT '
'BY %s *****' % str(self.model_class))
else:
if self.model_class.paramPhotFlag or \
self.model_class.photometryFlag:
raise RuntimeError('Photometry data required to '
'run %s' % str(self.model_class))
def setup_params(self):
# Number of photometry sets
n_phot_sets = 0
# Number of astrometry sets
n_ast_sets = 0
phot_params = []
ast_params = []
# The indices in map_phot_idx_to_ast_idx map phot to astrom
# map_phot_idx_to_ast_idx <--> [0, 1, 2, ... len(map_phot_idx_to_ast_idx)-1]
map_phot_idx_to_ast_idx = []
for key in self.data.keys():
if 't_phot' in key and (self.model_class.paramPhotFlag or self.model_class.photometryFlag):
n_phot_sets += 1
# Photometry parameters
for phot_name in self.model_class.phot_param_names:
phot_params.append(phot_name + str(n_phot_sets))
# Optional photometric parameters -- not all filters
for opt_phot_name in self.model_class.phot_optional_param_names:
if isinstance(self.use_phot_optional_params, (list, np.ndarray)):
if self.use_phot_optional_params[n_phot_sets-1]:
phot_params.append(opt_phot_name + str(n_phot_sets))
# Case: single value -- set for all filters.
else:
if self.use_phot_optional_params:
phot_params.append(opt_phot_name + str(n_phot_sets))
else:
msg = 'WARNING: Your model supports optional photometric parameters; '
msg += 'but you have disabled them for all filters. '
msg += 'Consider using a simpler model instead.'
print(msg)
# Additive error parameters (not on the model) -- not all filters
if self.add_error_on_photometry:
# Case: List -- control additive error on each filter.
if isinstance(self.add_error_on_photometry, (list, np.ndarray)):
if self.add_error_on_photometry[n_phot_sets-1]:
phot_params.append('add_err' + str(n_phot_sets))
# Case: single value -- set for all filters.
else:
phot_params.append('add_err' + str(n_phot_sets))
# Multiplicative error parameters (not on the model) -- not all filters
if self.multiply_error_on_photometry:
# Case: List -- control additive error on each filter.
if isinstance(self.multiply_error_on_photometry, (list, np.ndarray)):
if self.multiply_error_on_photometry[n_phot_sets-1]:
phot_params.append('mult_err' + str(n_phot_sets))
# Case: single value -- set for all filters.
else:
phot_params.append('mult_err' + str(n_phot_sets))
if 't_ast' in key and (self.model_class.paramAstromFlag or self.model_class.astrometryFlag):
n_ast_sets += 1
# Optional astrometric parameters -- not all filters
for opt_ast_name in self.model_class.ast_optional_param_names:
if isinstance(self.use_ast_optional_params, (list, np.ndarray)):
if self.use_ast_optional_params[n_ast_sets-1]:
ast_params.append(opt_ast_name + str(n_ast_sets))
# Case: single value -- set for all filters.
else:
if self.use_ast_optional_params:
ast_params.append(opt_ast_name + str(n_ast_sets))
else:
msg = 'WARNING: Your model supports optional astrometric parameters; '
msg += 'but you have disabled them for all filters. '
msg += 'Consider using a simpler model instead.'
print(msg)
# The indices in map_phot_idx_to_ast_idx map phot to astrom
# map_phot_idx_to_ast_idx <--> [0, 1, 2, ... len(map_phot_idx_to_ast_idx)-1]
if n_ast_sets > 0 and n_phot_sets > 0:
for aa in self.data['ast_data']:
try:
idx = self.data['phot_data'].index(aa)
map_phot_idx_to_ast_idx.append(idx)
except ValueError:
print('*** CHECK YOUR INPUT! All astrometry data must have a corresponding photometry data set! ***')
raise
self.n_phot_sets = n_phot_sets
self.n_ast_sets = n_ast_sets
self.map_phot_idx_to_ast_idx = map_phot_idx_to_ast_idx
self.fitter_param_names = self.model_class.fitter_param_names + \
phot_params + ast_params
if self.custom_additional_param_names is not None:
self.additional_param_names = []
for cc, param_name in enumerate(self.custom_additional_param_names):
if param_name in self.multi_filt_params:
# Special handling for gp params
if param_name in self.gp_params:
if self.use_phot_optional_params is True:
for ff in range(n_phot_sets):
self.additional_param_names += [param_name + str(ff+1)]
elif self.use_phot_optional_params is False:
continue
else:
for ii, use in enumerate(self.use_phot_optional_params):
if use:
self.additional_param_names += [param_name + str(ii+1)]
else:
self.additional_param_names += [param_name]
else:
self.additional_param_names = []
for i, param_name in enumerate(self.model_class.additional_param_names):
if param_name in self.multi_filt_params:
# Special handling for gp params
if param_name in self.gp_params:
if self.use_phot_optional_params is True:
for nn in range(self.n_phot_sets):
self.additional_param_names += [param_name + str(nn+1)]
elif self.use_phot_optional_params is False:
continue
else:
for ii, use in enumerate(self.use_phot_optional_params):
if use:
self.additional_param_names += [param_name + str(ii+1)]
else:
self.additional_param_names += [param_name]
self.all_param_names = self.fitter_param_names + self.additional_param_names
self.n_dims = len(self.fitter_param_names)
self.n_params = len(self.all_param_names) # cube dimensions
self.n_clustering_params = self.n_dims
def make_default_priors(self):
"""
Setup our prior distributions (i.e. random samplers). We will
draw from these in the Prior() function. We set them up in advance
because they depend on properties of the data. Also,
they can be over-written by custom priors as desired.
To make your own custom priors, use the make_gen() functions
with different limits.
"""
# if os.path.exists("u0.txt"):
# os.remove("u0.txt")
#
# if os.path.exists("piEE.txt"):
# os.remove("piEE.txt")
#
# if os.path.exists("piEN.txt"):
# os.remove("piEN.txt")
self.priors = {}
for param_name in self.fitter_param_names:
if any(x in param_name for x in self.multi_filt_params):
priors_name, filt_index = split_param_filter_index1(param_name)
else:
priors_name = param_name
filt_index = None
# FIXME: can we write the code so it doesn't require the prior to exist here?
foo = self.default_priors[priors_name]
prior_type = foo[0]
if prior_type == 'make_gen':
prior_min = foo[1]
prior_max = foo[2]
self.priors[param_name] = make_gen(prior_min, prior_max)
if prior_type == 'make_norm_gen':
prior_mean = foo[1]
prior_std = foo[2]
self.priors[param_name] = make_norm_gen(prior_mean, prior_std)
if prior_type == 'make_lognorm_gen':
prior_mean = foo[1]
prior_std = foo[2]
self.priors[param_name] = make_lognorm_gen(prior_mean, prior_std)
if prior_type == 'make_truncnorm_gen':
prior_mean = foo[1]
prior_std = foo[2]
prior_lo_cut = foo[3]
prior_hi_cut = foo[4]
self.priors[param_name] = make_truncnorm_gen(prior_mean, prior_std, prior_lo_cut, prior_hi_cut)
if prior_type == 'make_invgamma_gen':
n_digits = len(param_name) - len(priors_name)
# Get the right indices.
num = int(param_name[-n_digits:])
self.priors[param_name] = make_invgamma_gen(self.data['t_phot' + str(num)])
elif prior_type == 'make_t0_gen':
# Hard-coded to use the first data set to set the t0 prior.
self.priors[param_name] = make_t0_gen(self.data['t_phot1'],
self.data['mag1'])
elif prior_type == 'make_xS0_gen':
if param_name == 'xS0_E':
pos = self.data['xpos1']
elif param_name == 'xS0_N':
pos = self.data['ypos1']
self.priors[param_name] = make_xS0_gen(pos)
elif prior_type == 'make_muS_EN_gen':
if param_name == 'muS_E':
pos = self.data['xpos1']
elif param_name == 'muS_N':
pos = self.data['ypos1']
self.priors[param_name] = make_muS_EN_gen(self.data['t_ast1'],
pos,
scale_factor=muS_scale_factor)
elif prior_type == 'make_piS':
self.priors[param_name] = make_piS()
elif prior_type == 'make_fdfdt':
self.priors[param_name] = make_fdfdt()
elif prior_type == 'make_mag_base_gen':
self.priors[param_name] = make_mag_base_gen(self.data['mag' + str(filt_index)])
return
def get_model(self, params):
if self.model_class.parallaxFlag:
raL, decL = self.data['raL'], self.data['decL']
else:
raL, decL = None, None
params_dict = generate_params_dict(params,
self.fitter_param_names)
mod = self.model_class(*params_dict.values(),
raL=raL,
decL=decL)
# FIXME: Why are we updating params here???
if not isinstance(params, (dict, Row)):
# FIXME: is there better way to do this.
for i, param_name in enumerate(self.additional_param_names):
filt_name, filt_idx = split_param_filter_index1(param_name)
if filt_idx == None: # Not a multi-filter paramter.
params[self.n_dims + i] = getattr(mod, param_name)
else:
params[self.n_dims + i] = getattr(mod, filt_name)[filt_idx-1]
return mod
# FIXME: Is there a reason Prior takes ndim and nparams when those aren't used?
# Is it the same reason as LogLikelihood?
def Prior(self, cube, ndim=None, nparams=None):
for i, param_name in enumerate(self.fitter_param_names):
cube[i] = self.priors[param_name].ppf(cube[i])
return cube
def Prior_copy(self, cube):
cube_copy = cube.copy()
for i, param_name in enumerate(self.fitter_param_names):
cube_copy[i] = self.priors[param_name].ppf(cube[i])
# Append on additional parameters.
add_params = np.zeros(len(self.additional_param_names), dtype='float')
cube_copy = np.append(cube_copy, add_params)
# Strangely, get_model does the parameter updating for the additional parameters.
# This should really be elsewhere FIXME.
model = self.get_model(cube_copy)
return cube_copy
# FIXME: I pass in ndim and nparams since that's what's done in Prior, but I don't think they're necessary?
def Prior_from_post(self, cube, ndim=None, nparams=None):
"""Get the bin midpoints
"""
binmids = []
for bb in np.arange(len(self.post_param_bins)):
binmids.append((self.post_param_bins[bb][:-1] + self.post_param_bins[bb][1:])/2)
# Draw a random sample from the posteriors.
post_params = self.sample_post(binmids, self.post_param_cdf, self.post_param_bininds)
# Make the cube by combining the posterior draws and the 1-D priors.
for i, param_name in enumerate(self.fitter_param_names):
if param_name in self.post_param_names:
pdx = self.post_param_names.index(param_name)
cube[i] = post_params[pdx]
else:
cube[i] = self.priors[param_name].ppf(cube[i])
return cube
def sample_post(self, binmids, cdf, bininds):
"""Randomly sample from a multinest posterior distribution.
Parameters
----------
Nparams:
number of parameters
Nbins:
number of histogram bins per dimension
Nnzero:
number of histogram bins with non-zero probability
binmids : list of length N, each list entry is an array of shape (M, )
The centers of the bins for each parameter
cdf : (Nnzero, ) array
CDF of the distribution. Only the non-zero probability entries.
bininds : (Nnzero, Nparams) array
Histogram indices of the non-zero probability entries.
"""
# Make a random sample from the posterior using inverse transform sampling.
rr = np.random.uniform()
if len(np.where(cdf > rr)[0]) == 0:
idx = 0
else:
idx = np.min(np.where(cdf > rr)[0])
# Get the random sample.
Npars = len(bininds[0])
pars = np.empty(len(bininds[0]), dtype=float)
for i in range(Npars):
pars[i] = binmids[i][int(bininds[idx,i])]
# Sample randomly within the bin width, so not just discreet points.
pars[i] += np.random.uniform() * (binmids[i][1] - binmids[i][0])
return pars
def LogLikelihood(self, cube, ndim=None, n_params=None):
"""This is just a wrapper because PyMultinest requires passing in
the ndim and nparams.
"""
lnL = self.log_likely(cube, verbose=self.verbose)
# lnL = self.log_likely0(cube, verbose=self.verbose)
return lnL
def dyn_prior(self, cube):
for i, param_name in enumerate(self.fitter_param_names):
cube[i] = self.priors[param_name].ppf(cube[i])
return cube
def dyn_log_likely(self, cube):
lnL = self.log_likely(cube, verbose=self.verbose)
return lnL
def log_likely_astrometry(self, model):
if model.astrometryFlag:
lnL_ast = 0.0
# If no photometry
if len(self.map_phot_idx_to_ast_idx) == 0:
for i in range(self.n_ast_sets):
lnL_ast_i = model.log_likely_astrometry(self.data['t_ast' + str(i+1)],
self.data['xpos' + str(i+1)],
self.data['ypos' + str(i+1)],
self.data['xpos_err' + str(i+1)],
self.data['ypos_err' + str(i+1)],
ast_filt_idx = i)
lnL_ast += lnL_ast_i.sum()
# If photometry
else:
for i in range(self.n_ast_sets):
lnL_ast_i = model.log_likely_astrometry(self.data['t_ast' + str(i+1)],
self.data['xpos' + str(i+1)],
self.data['ypos' + str(i+1)],
self.data['xpos_err' + str(i+1)],
self.data['ypos_err' + str(i+1)],
ast_filt_idx = self.map_phot_idx_to_ast_idx[i])
lnL_ast += lnL_ast_i.sum()
else:
lnL_ast = 0
return lnL_ast
def log_likely_photometry(self, model, cube):
if model.photometryFlag:
lnL_phot = 0.0
for i in range(self.n_phot_sets):
t_phot = self.data['t_phot' + str(i + 1)]
mag = self.data['mag' + str(i + 1)]
# additive or multiplicative error
mag_err = self.get_modified_mag_err(cube, i)
lnL_phot += model.log_likely_photometry(t_phot, mag, mag_err, i)
else:
lnL_phot = 0
return lnL_phot
def log_likely(self, cube, verbose=False):
"""
Parameters
--------------
cube : list or dict
The dictionary or cube of the model parameters.
"""
model = self.get_model(cube)
# # Useful for debugging the parallax cache.
# def get_cache_size():
# """Print out the cache size"""
# cache_file = mmodel.cache_dir + '/joblib/microlens/jlu/model/parallax_in_direction/'
#
# size = 0
# for path, dirs, files in os.walk(cache_file):
# for f in files:
# fp = os.path.join(path, f)
# size += os.path.getsize(fp)
#
# return size
#
# print(f'Cache size = {get_cache_size()}')
lnL_phot = self.log_likely_photometry(model, cube)
lnL_ast = self.log_likely_astrometry(model)
lnL = lnL_phot + lnL_ast
if verbose:
self.plot_model_and_data(model)
fmt = '{0:13s} = {1:f} '
for ff in range(self.n_params):
if isinstance(cube, dict) or isinstance(cube, Row):
pname = self.all_param_names[ff]
if ((isinstance(cube, dict) and pname in cube) or
(isinstance(cube, Row) and pname in cube.colnames)):
print(fmt.format(pname, cube[pname])),
else:
print(fmt.format(pname, -999.0)),
else:
print(fmt.format(self.all_param_names[ff], cube[ff])),
print(fmt.format('lnL_phot', lnL_phot)),
print(fmt.format('lnL_ast', lnL_ast)),
print(fmt.format('lnL', lnL))
# pdb.set_trace()
return lnL
def callback_plotter(self, nSamples, nlive, nPar,
physLive, posterior, stats, maxLogLike, logZ, logZerr, foo):
# ideally this should work; but it looks like
# it has been mangled by multinest.
# p_mean = stats[0]
# p_std = stats[1]
# p_best = stats[2]
# p_map = stats[3]
# p_best = posterior.mean(axis=0)[0:-2]
bdx = np.argmax(physLive[:, -1])
p_best = physLive[bdx, 0:-1]
print('')
print('UPDATE: Current MaxLogLike = ', physLive[bdx, -1])
print('')
model = self.get_model(p_best)
self.plot_model_and_data(model)
return
# Code for randomly sampling prior
# def log_likely0(self, cube, verbose=False):
# """
# Parameters
# _____________
# cube : list or dict
# The dictionary or cube of the model parameters.
# """
# model = self.get_model(cube)
#
# with open("u0.txt", "a") as f:
# t = cube[1]
# f.write(str(t) + '\n')
#
# with open("piEE.txt", "a") as f:
# t = cube[5]
# f.write(str(t) + '\n')
#
# with open("piEN.txt", "a") as f:
# t = cube[6]
# f.write(str(t) + '\n')
#
# return -1
def get_modified_mag_err(self, cube, filt_index):
mag_err = copy.deepcopy(self.data['mag_err' + str(filt_index + 1)])
if self.add_error_on_photometry:
add_err_name = 'add_err' + str(filt_index + 1)
if isinstance(cube, dict) or isinstance(cube, Row):
add_err = cube[add_err_name]
else:
add_err_idx = self.all_param_names.index(add_err_name)
add_err = cube[add_err_idx]
mag_err = np.hypot(mag_err, add_err)
if self.multiply_error_on_photometry:
mult_err_name = 'mult_err' + str(filt_index + 1)
if isinstance(cube, dict) or isinstance(cube, Row):
mult_err = cube[mult_err_name]
else:
mult_err_idx = self.all_param_names.index(mult_err_name)
mult_err = cube[mult_err_idx]
mag_err *= mult_err
return mag_err
def write_params_yaml(self):
"""
Write a YAML file that contains the parameters to re-initialize
this object, if desired.
"""
params = {}
params['target'] = self.data['target']
params['phot_data'] = self.data['phot_data']
params['phot_files'] = self.data['phot_files']
params['astrom_data'] = self.data['ast_data']
params['astrom_files'] = self.data['ast_files']
params['add_error_on_photometry'] = self.add_error_on_photometry
params['multiply_error_on_photometry'] = self.multiply_error_on_photometry
params['use_phot_optional_params'] = self.use_phot_optional_params
params['use_ast_optional_params'] = self.use_ast_optional_params
params['model'] = self.model_class.__name__
params['custom_additional_param_names'] = self.custom_additional_param_names
params['wrapped_params'] = self.wrapped_params
params['run_date'] = str(date.today())
with open(self.outputfiles_basename + 'params.yaml', 'w') as f:
foo = yaml.dump(params, f)
return
def solve(self):
"""
Run a MultiNest fit to find the optimal parameters (and their
posteriors) given the data.
Note we will ALWAYS tell multinest to be verbose.
"""
self.write_params_yaml()
# Choose whether to use self.Prior or self.Prior_from_post depending
# on whether self.post_param_names is none or not.
use_prior = None
if self.post_param_cdf is not None:
use_prior = self.Prior_from_post
else:
use_prior = self.Prior
print('*************************************************')
print('*** Using', use_prior.__name__, 'for prior function. ***')
print('*************************************************')
pymultinest.run(self.LogLikelihood, use_prior, self.n_dims,
n_params=self.n_params,
n_clustering_params=self.n_clustering_params,
multimodal=self.multimodal,
importance_nested_sampling=self.importance_nested_sampling,
wrapped_params=self.wrapped_params,
const_efficiency_mode=self.const_efficiency_mode,
n_live_points=self.n_live_points,
evidence_tolerance=self.evidence_tolerance,
sampling_efficiency=self.sampling_efficiency,
n_iter_before_update=self.n_iter_before_update,
null_log_evidence=self.null_log_evidence,
max_modes=self.max_modes,
mode_tolerance=self.mode_tolerance,
outputfiles_basename=self.outputfiles_basename,
seed=self.seed,
# verbose=self.verbose,
verbose=True,
resume=self.resume,
context=self.context,
write_output=self.write_output,
log_zero=self.log_zero,
max_iter=self.max_iter,
init_MPI=self.init_MPI,
dump_callback=self.dump_callback)
return
def separate_modes(self):
"""
Reads in the fits for the different modes (post_separate.dat)
and splits it into a .dat file per mode.
Is there a more intelligent way to deal with all the indices???
Write better later, but it seems to work for now...
"""
mode_file = self.outputfiles_basename + 'post_separate.dat'
# Search for the empty lines (these separate the different modes)
empty_lines = []
with open(mode_file, 'r') as orig_file:
for num, line in enumerate(orig_file, start=0):
if line == '\n':
empty_lines.append(num)
# Error checking
if len(empty_lines) % 2 != 0:
print('SOMETHING BAD HAPPENED!')
# Figure out how many modes there are (# modes = idx_range)
idx_range = int(len(empty_lines) / 2)
# Split into the different files
orig_tab = np.loadtxt(mode_file)
for idx in np.arange(idx_range):
start_idx = empty_lines[idx * 2 + 1] + 1 - 2 * (idx + 1)
if idx != np.arange(idx_range)[-1]:
end_idx = empty_lines[idx * 2 + 2] - 2 * (idx + 1)
np.savetxt(
self.outputfiles_basename + 'mode' + str(idx) + '.dat',
orig_tab[start_idx:end_idx])
else:
np.savetxt(
self.outputfiles_basename + 'mode' + str(idx) + '.dat',
orig_tab[start_idx:])
return
def calc_best_fit(self, tab, smy, s_idx=0, def_best='maxl'):
"""Returns best-fit parameters, where best-fit can be
median, maxl, or MAP. Default is maxl.
If best-fit is median, then also return +/- 1 sigma
uncertainties.
If best-fit is MAP, then also need to indicate which row of
summary table to use. Default is `s_idx = 0` (global solution).
`s_idx = 1, 2, ... , n` for the n different modes.
`tab = self.load_mnest_results()`
`smy = self.load_mnest_summary()`
"""
params = self.all_param_names
# Use Maximum Likelihood solution
if def_best.lower() == 'maxl':
best = np.argmax(tab['logLike'])
tab_best = tab[best][params]
return tab_best
# Use MAP solution
if def_best.lower() == 'map':
# tab_best = {}
# for n in params:
# if (n != 'weights' and n != 'logLike'):
# tab_best[n] = smy['MAP_' + n][s_idx]
# Recalculate ourselves. No dependence on smy.
best = np.argmax(tab['weights'])
tab_best = tab[best][params]
return tab_best
# Use mean solution
if def_best.lower() == 'mean':
tab_best = {}
tab_errors = {}
for n in params:
if (n != 'weights' and n != 'logLike'):
tab_best[n] = np.mean(tab[n])
tab_errors[n] = np.std(tab[n])
return tab_best, tab_errors
# Use median solution
if def_best.lower() == 'median':
tab_best = {}
med_errors = {}
sumweights = np.sum(tab['weights'])
weights = tab['weights'] / sumweights
sig1 = 0.682689
sig2 = 0.9545
sig3 = 0.9973
sig1_lo = (1. - sig1) / 2.
sig2_lo = (1. - sig2) / 2.
sig3_lo = (1. - sig3) / 2.
sig1_hi = 1. - sig1_lo
sig2_hi = 1. - sig2_lo
sig3_hi = 1. - sig3_lo
for n in params:
# Calculate median, 1 sigma lo, and 1 sigma hi credible interval.
tmp = weighted_quantile(tab[n], [0.5, sig1_lo, sig1_hi],
sample_weight=weights)
tab_best[n] = tmp[0]
# Switch from values to errors.
err_lo = tmp[0] - tmp[1]
err_hi = tmp[2] - tmp[0]
med_errors[n] = np.array([err_lo, err_hi])
return tab_best, med_errors
def get_best_fit(self, def_best='maxl'):
"""Returns best-fit parameters, where best-fit can be
median, maxl, or MAP. Default is maxl.
If best-fit is median, then also return +/- 1 sigma
uncertainties.
`tab = self.load_mnest_results()`
`smy = self.load_mnest_summary()`
"""
tab = self.load_mnest_results()
smy = self.load_mnest_summary()
best_fit = self.calc_best_fit(tab=tab, smy=smy, s_idx=0,
def_best=def_best)
return best_fit
def get_best_fit_modes(self, def_best='maxl'):
"""Identify best-fit model
"""
tab_list = self.load_mnest_modes()
smy = self.load_mnest_summary()
best_fit_list = []
# ADD A USEFUL COMMENT HERE ABOUT INDEXING!!!!!!
for ii, tab in enumerate(tab_list, 1):
best_fit = self.calc_best_fit(tab=tab, smy=smy, s_idx=ii,
def_best=def_best)
# best_fit_list.append(best_fit[0])
best_fit_list.append(best_fit)
return best_fit_list
def get_best_fit_model(self, def_best='maxl'):
"""Identify best-fit model
Parameters
-----------
def_best : str
Choices are 'map' (maximum a posteriori), 'median', or
'maxl' (maximum likelihood)
"""
best = self.get_best_fit(def_best=def_best)
if ((def_best == 'median') or (def_best == 'mean')):
pspl_mod = self.get_model(best[0])
else:
pspl_mod = self.get_model(best)
return pspl_mod
def get_best_fit_modes_model(self, def_best='maxl'):
best_list = self.get_best_fit_modes(def_best=def_best)
pspl_mod_list = []
for best in best_list:
pspl_mod = self.get_model(best)
pspl_mod_list.append(pspl_mod)
return pspl_mod_list
def load_mnest_results(self, remake_fits=False):
"""Load up the MultiNest results into an astropy table.
"""
outroot = self.outputfiles_basename
if remake_fits or not os.path.exists(outroot + '.fits'):
# Load from text file (and make fits file)
tab = Table.read(outroot + '.txt', format='ascii')
# Convert to log(likelihood) since Multinest records -2*logLikelihood
tab['col2'] /= -2.0
# Rename the parameter columns. This is hard-coded to match the
# above run() function.
tab.rename_column('col1', 'weights')
tab.rename_column('col2', 'logLike')
for ff in range(len(self.all_param_names)):
cc = 3 + ff
tab.rename_column('col{0:d}'.format(cc), self.all_param_names[ff])
tab.write(outroot + '.fits', overwrite=True)
else:
# Load much faster from fits file.
tab = Table.read(outroot + '.fits')
return tab
def load_mnest_summary(self, remake_fits=False):
"""Load up the MultiNest results into an astropy table.
"""
sum_root = self.outputfiles_basename + 'summary'
if remake_fits or not os.path.exists(sum_root + '.fits'):
# Load from text file (and make fits file)
tab = Table.read(sum_root + '.txt', format='ascii')
tab.rename_column('col' + str(len(tab.colnames) - 1), 'logZ')
tab.rename_column('col' + str(len(tab.colnames)), 'maxlogL')
for ff in range(len(self.all_param_names)):
mean = 0 * len(self.all_param_names) + 1 + ff
stdev = 1 * len(self.all_param_names) + 1 + ff
maxlike = 2 * len(self.all_param_names) + 1 + ff
maxapost = 3 * len(self.all_param_names) + 1 + ff
tab.rename_column('col{0:d}'.format(mean),
'Mean_' + self.all_param_names[ff])
tab.rename_column('col{0:d}'.format(stdev),
'StDev_' + self.all_param_names[ff])
tab.rename_column('col{0:d}'.format(maxlike),
'MaxLike_' + self.all_param_names[ff])
tab.rename_column('col{0:d}'.format(maxapost),
'MAP_' + self.all_param_names[ff])
tab.write(sum_root + '.fits', overwrite=True)
else:
# Load from fits file, which is much faster.
tab = Table.read(sum_root + '.fits')
return tab
def load_mnest_modes(self, remake_fits=False):
"""Load up the separate modes results into an astropy table.
"""
# Get all the different mode files
tab_list = []
modes = glob.glob(self.outputfiles_basename + 'mode*.dat')
if len(modes) < 1:
# In rare cases, we don't have the .dat files (modified, re-split).
# Then check the *.fits files.
modes = glob.glob(self.outputfiles_basename + 'mode*.fits')
if len(modes) < 1:
print('No modes files! Did you run multinest_utils.separate_mode_files yet?')
else:
remake_fits = False
for num, mode in enumerate(modes, start=0):
mode_root = self.outputfiles_basename + 'mode' + str(num)
if remake_fits or not os.path.exists(mode_root + '.fits'):
# Load from text file (and make fits file)
tab = Table.read(mode_root + '.dat', format='ascii')
# Convert to log(likelihood) since Multinest records -2*logLikelihood
tab['col2'] /= -2.0
# Rename the parameter columns.
tab.rename_column('col1', 'weights')
tab.rename_column('col2', 'logLike')
for ff in range(len(self.all_param_names)):
cc = 3 + ff
tab.rename_column('col{0:d}'.format(cc), self.all_param_names[ff])
tab.write(mode_root + '.fits', overwrite=True)
else:
tab = Table.read(mode_root + '.fits')
tab_list.append(tab)
return tab_list
def load_mnest_results_for_dynesty(self, remake_fits=False):
"""Make a Dynesty-style results object that can
be used in the nicer plotting codes.
"""
# Fetch the summary stats for the global solution
stats = self.load_mnest_summary(remake_fits=remake_fits)
stats = stats[0]
# Load up all of the parameters.
data_tab = self.load_mnest_results(remake_fits=remake_fits)
# Sort the samples by increasing log-like.
sdx = data_tab['logLike'].argsort()
data_tab = data_tab[sdx]
weights = data_tab['weights']
loglike = data_tab['logLike']
samples = np.zeros((len(data_tab), len(self.all_param_names)), dtype=float)
for ff in range(len(self.all_param_names)):
samples[:, ff] = data_tab[self.all_param_names[ff]].astype(np.float64)
logZ = stats['logZ']
logvol = np.log(weights) - loglike + logZ
logvol = logvol - logvol.max()
results = dict(samples=samples, weights=weights, logvol=logvol, loglike=loglike)
return results
def load_mnest_modes_results_for_dynesty(self, remake_fits=False):
"""Make a Dynesty-style results object that can
be used in the nicer plotting codes.
"""
results_list = []
# Load up the summary results and trim out the global mode.
stats = self.load_mnest_summary(remake_fits=remake_fits)
stats = stats[1:]
# Load up all of the parameters.
modes_list = self.load_mnest_modes(remake_fits=remake_fits)
for num, data_tab in enumerate(modes_list, start=0):
# Sort the samples by increasing log-like.
sdx = data_tab['logLike'].argsort()
data_tab = data_tab[sdx]
weights = data_tab['weights']
loglike = data_tab['logLike']
samples = np.zeros((len(data_tab), len(self.all_param_names)), dtype=float)
for ff in range(len(self.all_param_names)):
samples[:, ff] = data_tab[self.all_param_names[ff]].astype(np.float64)
logZ = stats['logZ'][num] # are these in the same order?
logvol = np.log(weights) - loglike + logZ
logvol = logvol - logvol.max()
results = dict(samples=samples, weights=weights, logvol=logvol, loglike=loglike)
results_list.append(results)
return results_list
def plot_dynesty_style(self, sim_vals=None, fit_vals=None, remake_fits=False, dims=None,
traceplot=True, cornerplot=True, kde=True):
"""
Parameters
------------
sim_vals : dict
Dictionary of simulated input or comparison values to
overplot on posteriors.
fit_vals : str
Choices are 'map' (maximum a posteriori), 'mean', or
'maxl' (maximum likelihood)
"""
res = self.load_mnest_results_for_dynesty(remake_fits=remake_fits)
smy = self.load_mnest_summary(remake_fits=remake_fits)
truths = None
# Sort the parameters into the right order.
if sim_vals != None:
truths = []
for param in self.all_param_names:
if param in sim_vals:
truths.append(sim_vals[param])
else:
truths.append(None)
if fit_vals == 'map':
truths = []
for param in self.all_param_names:
truths.append(smy['MAP_' + param][0]) # global best fit.
if fit_vals == 'mean':
truths = []
for param in self.all_param_names:
truths.append(smy['Mean_' + param][0]) # global best fit.
if fit_vals == 'maxl':
truths = []
for param in self.all_param_names:
truths.append(smy['MaxLike_' + param][0]) # global best fit.
if dims is not None:
labels=[self.all_param_names[i] for i in dims]
truths=[truths[i] for i in dims]
else:
labels=self.all_param_names
if traceplot:
dyplot.traceplot(res, labels=labels, dims=dims,
show_titles=True, truths=truths, kde=kde)
plt.subplots_adjust(hspace=0.7)
plt.savefig(self.outputfiles_basename + 'dy_trace.png')
plt.close()
if cornerplot:
dyplot.cornerplot(res, labels=labels, dims=dims,
show_titles=True, truths=truths)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=10)
plt.savefig(self.outputfiles_basename + 'dy_corner.png')
plt.close()
return
def plot_model_and_data(self, model,
input_model=None, mnest_results=None, suffix='',
zoomx=None, zoomy=None, zoomy_res=None, fitter=None,
N_traces=50):
"""
Make and save the model and data plots.
zoomx, xoomy, zoomy_res : list the same length as `self.n_phot_sets`
Each entry of the list is a list `[a, b]` cooresponding to the plot limits
"""
# Plot out parameters (just record values)
fig = plot_params(model)
fig.savefig(self.outputfiles_basename + 'parameters.png')
plt.close()
# Plot photometry
if model.photometryFlag:
for i in range(self.n_phot_sets):
if hasattr(model, 'use_gp_phot'):
if model.use_gp_phot[i]:
gp = True
else:
gp = False
else:
gp = False
# if gp:
# pointwise_likelihood(self.data, model, filt_index=i)
# debug_gp_nan(self.data, model, filt_index=i)
fig = plot_photometry(self.data, model, input_model=input_model,
dense_time=True, residuals=True,
filt_index=i, mnest_results=mnest_results, gp=gp, fitter=fitter,
N_traces=N_traces)
fig.savefig(self.outputfiles_basename
+ 'phot_and_residuals_'
+ str(i + 1) + suffix + '.png')
plt.close()
if (zoomx is not None) or (zoomy is not None) or (zoomy_res is not None):
if zoomx is not None:
zoomxi=zoomx[i]
else:
zoomxi=None
if zoomy is not None:
zoomyi=zoomy[i]
else:
zoomyi=None
if zoomy_res is not None:
zoomy_resi=zoomy_res[i]
else:
zoomy_resi=None
fig = plot_photometry(self.data, model, input_model=input_model,
dense_time=True, residuals=True,
filt_index=i, mnest_results=mnest_results,
zoomx=zoomxi, zoomy=zoomyi, zoomy_res=zoomy_resi,
gp=gp, fitter=fitter, N_traces=N_traces)
fig.savefig(self.outputfiles_basename
+ 'phot_and_residuals_'
+ str(i + 1) + suffix + 'zoom.png')
plt.close()
if gp:
fig = plot_photometry_gp(self.data, model, input_model=input_model,
dense_time=True, residuals=True,
filt_index=i, mnest_results=mnest_results, gp=gp,
N_traces=N_traces)
if fig is not None:
fig.savefig(self.outputfiles_basename
+ 'phot_and_residuals_gp_'
+ str(i + 1) + suffix + '.png')
plt.close()
if (zoomx is not None) or (zoomy is not None) or (zoomy_res is not None):
if zoomx is not None:
zoomxi=zoomx[i]
else:
zoomxi=None
if zoomy is not None:
zoomyi=zoomy[i]
else:
zoomyi=None
if zoomy_res is not None:
zoomy_resi=zoomy_res[i]
else:
zoomy_resi=None
fig = plot_photometry_gp(self.data, model, input_model=input_model,
dense_time=True, residuals=True,
filt_index=i, mnest_results=mnest_results,
zoomx=zoomxi, zoomy=zoomyi, zoomy_res=zoomy_resi, gp=gp,
N_traces=N_traces)
if fig is not None:
fig.savefig(self.outputfiles_basename
+ 'phot_and_residuals_gp_'
+ str(i + 1) + suffix + 'zoom.png')
plt.close()
if model.astrometryFlag:
for i in range(self.n_ast_sets):
# If no photometry
if len(self.map_phot_idx_to_ast_idx) == 0:
fig_list = plot_astrometry(self.data, model,
input_model=input_model,
dense_time=True,
n_phot_sets=self.n_phot_sets,
filt_index=i,
ast_filt_index=i,
mnest_results=mnest_results, fitter=fitter,
N_traces=N_traces)
# If photometry
else:
fig_list = plot_astrometry(self.data, model,
input_model=input_model,
dense_time=True,
n_phot_sets=self.n_phot_sets,
filt_index=i,
ast_filt_index=self.map_phot_idx_to_ast_idx[i],
mnest_results=mnest_results, fitter=fitter,
N_traces=N_traces)
fig_list[0].savefig(
self.outputfiles_basename + 'astr_on_sky_' + str(i + 1) + suffix + '.png')
fig_list[1].savefig(
self.outputfiles_basename + 'astr_time_RA_' + str(i + 1) + suffix + '.png')
fig_list[2].savefig(
self.outputfiles_basename + 'astr_time_Dec_' + str(i + 1) + suffix + '.png')
fig_list[3].savefig(
self.outputfiles_basename + 'astr_time_RA_remove_pm_' + str(i + 1) + suffix + '.png')
fig_list[4].savefig(
self.outputfiles_basename + 'astr_time_Dec_remove_pm_' + str(i + 1) + suffix + '.png')
fig_list[5].savefig(
self.outputfiles_basename + 'astr_remove_pm_' + str(i + 1) + suffix + '.png')
fig_list[6].savefig(
self.outputfiles_basename + 'astr_on_sky_unlensed' + suffix + '.png')
fig_list[7].savefig(
self.outputfiles_basename + 'astr_longtime_RA_remove_pm' + suffix + '.png')
fig_list[8].savefig(
self.outputfiles_basename + 'astr_longtime_Dec_remove_pm' + suffix + '.png')
fig_list[9].savefig(
self.outputfiles_basename + 'astr_longtime_remove_pm' + suffix + '.png')
for fig in fig_list:
plt.close(fig)
return
def plot_model_and_data_modes(self, def_best='maxl'):
"""Plots photometry data, along with n random draws from the posterior.
"""
pspl_mod_list = self.get_best_fit_modes_model(def_best=def_best)
for num, pspl_mod in enumerate(pspl_mod_list, start=0):
model = pspl_mod
self.plot_model_and_data(model, suffix='_mode' + str(num))
return
def summarize_results(self, def_best='maxl', remake_fits=False):
tab = self.load_mnest_results(remake_fits=remake_fits)
smy = self.load_mnest_summary(remake_fits=remake_fits)
if len(tab) < 1:
print('Did you run multinest_utils.separate_mode_files yet?')
# Which params to include in table
parameters = tab.colnames
parameters.remove('weights')
parameters.remove('logLike')
print('####################')
print('Median Solution:')
print('####################')
fmt_med = ' {0:15s} {1:10.3f} + {2:10.3f} - {3:10.3f}'
fmt_other = ' {0:15s} {1:10.3f}'
best_arr = self.get_best_fit(def_best='median')
best = best_arr[0]
errs = best_arr[1]
for n in parameters:
print(fmt_med.format(n, best[n], errs[n][0], errs[n][1]))
self.print_likelihood(params=best)
print('')
print('####################')
print('Max-likelihood Solution:')
print('####################')
best = self.get_best_fit(def_best='maxl')
for n in parameters:
print(fmt_other.format(n, best[n]))
self.print_likelihood(params=best)
print('')
print('####################')
print('MAP Solution:')
print('####################')
best = self.get_best_fit(def_best='map')
for n in parameters:
print(fmt_other.format(n, best[n]))
self.print_likelihood(params=best)
print('')
return
def summarize_results_modes(self, remake_fits=False):
tab_list = self.load_mnest_modes(remake_fits=remake_fits)
smy = self.load_mnest_summary(remake_fits=remake_fits)
if len(tab_list) < 1:
print('Did you run multinest_utils.separate_mode_files yet?')
print('Number of modes : ' + str(len(tab_list)))
for ii, tab in enumerate(tab_list, 1):
# Which params to include in table
parameters = tab.colnames
parameters.remove('weights')
parameters.remove('logLike')
print('####################')
print('Median Solution:')
print('####################')
fmt_med = ' {0:15s} {1:10.3f} + {2:10.3f} - {3:10.3f}'
fmt_other = ' {0:15s} {1:10.3f}'
best_arr = self.calc_best_fit(tab=tab, smy=smy, s_idx=ii,
def_best='median')
best = best_arr[0]
errs = best_arr[1]
for n in parameters:
print(fmt_med.format(n, best[n], errs[n][0], errs[n][1]))
self.print_likelihood(params=best)
print('')
print('####################')
print('Max-likelihood Solution:')
print('####################')
best = self.calc_best_fit(tab=tab, smy=smy, s_idx=ii,
def_best='maxl')
for n in parameters:
print(fmt_other.format(n, best[n]))
self.print_likelihood(params=best)
print('')
print('####################')
print('MAP Solution:')
print('####################')
best = self.calc_best_fit(tab=tab, smy=smy, s_idx=ii,
def_best='map')
for n in parameters:
print(fmt_other.format(n, best[n]))
self.print_likelihood(params=best)
print('')
return
def print_likelihood(self, params='best', verbose=True):
"""
Parameters
-----------
model_params : str or dict, optional
model_params = 'best' will load up the best solution and calculate
the chi^2 based on those values. Alternatively, pass in a dictionary
with the model parameters to use.
"""
if params == 'best':
params = self.get_best_fit()
lnL = self.log_likely(params, verbose)
chi2 = self.calc_chi2(params, verbose)
print('logL : {0:.1f}'.format(lnL))
print('chi2 : {0:.1f}'.format(chi2))
return
def calc_chi2(self, params='best', verbose=False):
"""
Parameters
-----------
params : str or dict, optional
model_params = 'best' will load up the best solution and calculate
the chi^2 based on those values. Alternatively, pass in a dictionary
with the model parameters to use.
"""
if params == 'best':
params = self.get_best_fit()
# Get likelihoods.
pspl = self.get_model(params)
lnL_phot = self.log_likely_photometry(pspl, params)
lnL_ast = self.log_likely_astrometry(pspl)
# Calculate constants needed to subtract from lnL to calculate chi2.
if pspl.astrometryFlag:
# Lists to store lnL, chi2, and constants for each filter.
chi2_ast_filts = []
lnL_const_ast_filts = []
for nn in range(self.n_ast_sets):
t_ast = self.data['t_ast' + str(nn + 1)]
x = self.data['xpos' + str(nn + 1)]
y = self.data['ypos' + str(nn + 1)]
xerr = self.data['xpos_err' + str(nn + 1)]
yerr = self.data['ypos_err' + str(nn + 1)]
# Calculate the lnL for just a single filter.
# If no photometry
if len(self.map_phot_idx_to_ast_idx) == 0:
lnL_ast_nn = pspl.log_likely_astrometry(t_ast, x, y, xerr, yerr, ast_filt_idx=nn)
# If photometry
else:
lnL_ast_nn = pspl.log_likely_astrometry(t_ast, x, y, xerr, yerr, ast_filt_idx=self.map_phot_idx_to_ast_idx[nn])
lnL_ast_nn = lnL_ast_nn.sum()
# Calculate the chi2 and constants for just a single filter.
lnL_const_ast_nn = -0.5 * np.log(2.0 * math.pi * xerr ** 2)
lnL_const_ast_nn += -0.5 * np.log(2.0 * math.pi * yerr ** 2)
lnL_const_ast_nn = lnL_const_ast_nn.sum()
chi2_ast_nn = (lnL_ast_nn - lnL_const_ast_nn) / -0.5
# Save to our lists
chi2_ast_filts.append(chi2_ast_nn)
lnL_const_ast_filts.append(lnL_const_ast_nn)
lnL_const_ast = sum(lnL_const_ast_filts)
else:
lnL_const_ast = 0
if pspl.photometryFlag:
# Lists to store lnL, chi2, and constants for each filter.
chi2_phot_filts = []
lnL_const_phot_filts = []
for nn in range(self.n_phot_sets):
if hasattr(pspl, 'use_gp_phot'):
if pspl.use_gp_phot[nn]:
gp = True
else:
gp = False
else:
gp = False
t_phot = self.data['t_phot' + str(nn + 1)]
mag = self.data['mag' + str(nn + 1)]
mag_err = self.get_modified_mag_err(params, nn)
# Calculate the lnL for just a single filter.
lnL_phot_nn = pspl.log_likely_photometry(t_phot, mag, mag_err, nn)
# Calculate the chi2 and constants for just a single filter.
if gp:
log_det = pspl.get_log_det_covariance(t_phot, mag, mag_err, nn)
lnL_const_phot_nn = -0.5 * log_det - 0.5 * np.log(2 * np.pi) * len(mag)
else:
lnL_const_phot_nn = -0.5 * np.log(2.0 * math.pi * mag_err**2)
lnL_const_phot_nn = lnL_const_phot_nn.sum()
chi2_phot_nn = (lnL_phot_nn - lnL_const_phot_nn) / -0.5
# Save to our lists
chi2_phot_filts.append(chi2_phot_nn)
lnL_const_phot_filts.append(lnL_const_phot_nn)
lnL_const_phot = sum(lnL_const_phot_filts)
else:
lnL_const_phot = 0
# Calculate chi2.
chi2_ast = (lnL_ast - lnL_const_ast) / -0.5
chi2_phot = (lnL_phot - lnL_const_phot) / -0.5
chi2 = chi2_ast + chi2_phot
if verbose:
fmt = '{0:13s} = {1:f} '
if pspl.photometryFlag:
for ff in range(self.n_phot_sets):
print(fmt.format('chi2_phot' + str(ff + 1), chi2_phot_filts[ff]))
if pspl.astrometryFlag:
for ff in range(self.n_ast_sets):
print(fmt.format('chi2_ast' + str(ff + 1), chi2_ast_filts[ff]))
print(fmt.format('chi2_phot', chi2_phot))
print(fmt.format('chi2_ast', chi2_ast))
print(fmt.format('chi2', chi2))
return chi2
def calc_chi2_manual(self, params='best', verbose=False):
"""
Parameters
-----------
params : str or dict, optional
model_params = 'best' will load up the best solution and calculate
the chi^2 based on those values. Alternatively, pass in a dictionary
with the model parameters to use.
"""
if params == 'best':
params = self.get_best_fit()
pspl = self.get_model(params)
if pspl.astrometryFlag:
# Lists to store lnL, chi2, and constants for each filter.
chi2_ast_filts = []
pspl = self.get_model(params)
for nn in range(self.n_ast_sets):
t_ast = self.data['t_ast' + str(nn + 1)]
x = self.data['xpos' + str(nn + 1)]
y = self.data['ypos' + str(nn + 1)]
xerr = self.data['xpos_err' + str(nn + 1)]
yerr = self.data['ypos_err' + str(nn + 1)]
# NOTE: WILL BREAK FOR LUMINOUS LENS. BREAKS FOR ASTROM AND PHOTOM??? ADD map_phot_
pos_out = pspl.get_astrometry(t_ast, ast_filt_idx=nn)
chi2_ast_nn = (x - pos_out[:,0])**2/xerr**2
chi2_ast_nn += (y - pos_out[:,1])**2/yerr**2
chi2_ast_filts.append(np.nansum(chi2_ast_nn))
else:
chi2_ast_filts = [0]
if pspl.photometryFlag:
# Lists to store lnL, chi2, and constants for each filter.
chi2_phot_filts = []
for nn in range(self.n_phot_sets):
if hasattr(pspl, 'use_gp_phot'):
if pspl.use_gp_phot[nn]:
gp = True
else:
gp = False
else:
gp = False
t_phot = self.data['t_phot' + str(nn + 1)]
mag = self.data['mag' + str(nn + 1)]
mag_err = self.get_modified_mag_err(params, nn)
if gp:
print('GP')
mod_m_at_dat, mod_m_at_dat_std = pspl.get_photometry_with_gp(t_phot, mag, mag_err, nn)
print(pspl.get_log_det_covariance(t_phot, mag, mag_err, nn))
mag_out = mod_m_at_dat
mag_err_out = mod_m_at_dat_std
chi2_phot_nn = (mag - mag_out)**2/mag_err_out**2
else:
mag_out = pspl.get_photometry(t_phot, nn)
chi2_phot_nn = (mag - mag_out)**2/mag_err**2
# chi2_phot_nn = (mag - mag_out)**2/mag_err**2
chi2_phot_filts.append(np.nansum(chi2_phot_nn))
print('NANs : ' + str(np.sum(np.isnan(chi2_phot_nn))))
else:
chi2_phot_filts = [0]
if verbose:
fmt = '{0:13s} = {1:f} '
if pspl.photometryFlag:
for ff in range(self.n_phot_sets):
print(fmt.format('chi2_phot' + str(ff + 1), chi2_phot_filts[ff]))
if pspl.astrometryFlag:
for ff in range(self.n_ast_sets):
print(fmt.format('chi2_ast' + str(ff + 1), chi2_ast_filts[ff]))
chi2 = np.sum(chi2_ast_filts) + np.sum(chi2_phot_filts)
# print(fmt.format('chi2_phot', chi2_phot))
# print(fmt.format('chi2_ast', chi2_ast))
# print(fmt.format('chi2', chi2))
#
return chi2
def write_summary_maxL(self, return_mnest_results=False):
tab = self.load_mnest_results()
smy = self.load_mnest_summary()
parameters = tab.colnames
fmt = '{0:15s} {1:10.3f}'
fmt_i = '{0:15s} {1:10d}'
k = self.n_dims
n_phot = 0
n_ast = 0
for nn in range(self.n_phot_sets):
n_phot += len(self.data['t_phot' + str(nn + 1)])
if self.n_ast_sets > 0:
for nn in range(self.n_ast_sets):
n_ast += 2 * len(self.data['t_ast' + str(nn + 1)])
n_tot = n_phot + n_ast
maxlogL = smy['maxlogL'][0]
aic = calc_AIC(k, maxlogL)
bic = calc_BIC(n_tot, k, maxlogL)
parameters.remove('weights')
parameters.remove('logLike')
best = self.get_best_fit(def_best='maxl')
chi2 = self.calc_chi2(params=best, verbose=True)
lnL = self.log_likely(cube
=best, verbose=True)
# Fetch the root name of the file.
file_dir, name_str = os.path.split(self.outputfiles_basename)
with open(name_str + 'maxL_summary.txt', 'w+') as myfile:
myfile.write(file_dir + '\n')
myfile.write(name_str + '\n')
myfile.write(fmt.format('logL', maxlogL) + '\n')
myfile.write(fmt.format('AIC', aic) + '\n')
myfile.write(fmt.format('BIC', bic) + '\n')
myfile.write(fmt.format('logL', lnL) + '\n')
myfile.write(fmt.format('chi2', chi2) + '\n')
myfile.write(fmt_i.format('n_tot', n_tot) + '\n')
myfile.write('\n')
for nn in parameters:
myfile.write(fmt.format(nn, best[nn]) + '\n')
if return_mnest_results:
return tab
else:
return
class PSPL_Solver_weighted(PSPL_Solver):
"""
Soliver where the likelihood function has each data
set weigthed equally (i.e. not the natural weighting by
the number of points; but rather each contributes
1/n_k where n is the number of data points and k is the data set.
"""
def __init__(self, data, model_class,
custom_additional_param_names=None,
add_error_on_photometry=False,
multiply_error_on_photometry=False,
use_phot_optional_params=True,
use_ast_optional_params=True,
wrapped_params=None,
importance_nested_sampling=False,
multimodal=True, const_efficiency_mode=False,
n_live_points=300,
evidence_tolerance=0.5, sampling_efficiency=0.8,
n_iter_before_update=100, null_log_evidence=-1e90,
max_modes=100, mode_tolerance=-1e90,
outputfiles_basename="chains/1-", seed=-1, verbose=False,
resume=False, context=0, write_output=True, log_zero=-1e100,
max_iter=0, init_MPI=False, dump_callback=None,
weights='phot_ast_equal'):
"""
See documentation for PSPL_Solver. The only additional input parameter
is weights which can be
* 'phot_ast_equal'
* 'all_equal'
* list - length of number of photom + astrom data sets
* array - length of number of photom + astrom data sets
"""
super().__init__(data, model_class,
custom_additional_param_names=custom_additional_param_names,
add_error_on_photometry=add_error_on_photometry,
multiply_error_on_photometry=multiply_error_on_photometry,
use_phot_optional_params=use_phot_optional_params,
use_ast_optional_params=use_ast_optional_params,
wrapped_params=wrapped_params,
importance_nested_sampling=importance_nested_sampling,
multimodal=multimodal,
const_efficiency_mode=const_efficiency_mode,
n_live_points=n_live_points,
evidence_tolerance=evidence_tolerance,
sampling_efficiency=sampling_efficiency,
n_iter_before_update=n_iter_before_update,
null_log_evidence=null_log_evidence,
max_modes=max_modes,
mode_tolerance=mode_tolerance,
outputfiles_basename=outputfiles_basename,
seed=seed,
verbose=verbose,
resume=resume,
context=context,
write_output=write_output,
log_zero=log_zero,
max_iter=max_iter,
init_MPI=init_MPI,
dump_callback=dump_callback)
self.weights = self.calc_weights(weights)
print(self.weights)
return
def calc_weights(self, weights):
"""
order of weight_arr is
`[phot_1, phot_2, ... phot_n, ast_1, ast_2, ... ast_n]`
"""
weights_arr = np.ones(self.n_phot_sets + self.n_ast_sets)
#####
# No weights
#####
if weights is None:
return weights_arr
# Calculate the number of photometry and astrometry data points
n_ast_data = 0
for nn in range(self.n_ast_sets):
n_ast_data += 2 * len(self.data['t_ast' + str(nn + 1)])
n_phot_data = 0
for i in range(self.n_phot_sets):
n_phot_data += len(self.data['t_phot' + str(i + 1)])
n_data = n_ast_data + n_phot_data
n_sets = self.n_phot_sets + self.n_ast_sets
#####
# All the photometry is weighted equally to the astrometry.
# The relative weights between the photometric data sets don't change.
#####
if weights == 'phot_ast_equal':
denom = n_ast_data * self.n_phot_sets + n_phot_data * self.n_ast_sets
# Photometry weights
for i in range(self.n_phot_sets):
n_i = len(self.data['t_phot' + str(i + 1)])
weights_arr[i] = (n_data / n_sets) * n_ast_data / denom
# Astrometry weights
for i in range(self.n_ast_sets):
n_i = len(self.data['t_ast' + str(i + 1)])
weights_arr[self.n_phot_sets + i] = (n_data / n_sets) * n_phot_data / denom
return weights_arr
#####
# Each data set is given equal weights, regardless of photometry
# or astrometry.
#####
if weights == 'all_equal':
# Photometry weights
for i in range(self.n_phot_sets):
n_i = len(self.data['t_phot' + str(i + 1)])
weights_arr[i] = (1e-3 * n_data / n_sets) * (1.0 / n_i)
# Astrometry weight
for i in range(self.n_ast_sets):
n_i = len(self.data['t_ast' + str(i + 1)])
weights_arr[self.n_phot_sets + i] = (1e-3 * n_data / n_sets) * (1.0 / n_i)
return weights_arr
#####
# Custom weights.
#####
else:
# Check weight array is right length, all positive numbers.
if not isinstance(weights, np.ndarray):
raise Exception('weight needs to be a numpy array.')
if len(weights_arr) != len(weights):
raise Exception('weight array needs to be the same length as the number of data sets.')
if len(np.where(weights < 0)[0]) > 0:
raise Exception('weights must be positive.')
return weights
def log_likely_astrometry(self, model):
if model.astrometryFlag:
lnL_ast = 0.0
for i in range(self.n_ast_sets):
t_ast = self.data['t_ast' + str(i + 1)]
xpos = self.data['xpos' + str(i + 1)]
ypos = self.data['ypos' + str(i + 1)]
xpos_err = self.data['xpos_err' + str(i + 1)]
ypos_err = self.data['ypos_err' + str(i + 1)]
weight = self.weights[self.n_phot_sets + i]
lnL_ast_unwgt = model.log_likely_astrometry(t_ast, xpos, ypos, xpos_err, ypos_err)
lnL_ast_i = lnL_ast_unwgt * weight
lnL_ast += lnL_ast_i
if self.verbose:
print(f'lnL_ast: i = {i} L_unwgt = {lnL_ast_unwgt:15.1f}, L_wgt = {lnL_ast_i:15.1f}, weights = {weight:.1e}')
else:
lnL_ast = 0
return lnL_ast
def log_likely_photometry(self, model, cube):
if model.photometryFlag:
lnL_phot = 0.0
for i in range(self.n_phot_sets):
t_phot = self.data['t_phot' + str(i + 1)]
mag = self.data['mag' + str(i + 1)]
# additive or multiplicative error
mag_err = self.get_modified_mag_err(cube, i)
weight = self.weights[i]
lnL_phot_unwgt = model.log_likely_photometry(t_phot, mag, mag_err, i)
lnL_phot_i = lnL_phot_unwgt * weight
lnL_phot += lnL_phot_i
if self.verbose:
print(f'lnL_phot: i = {i} L_unwgt = {lnL_phot_unwgt:15.1f}, L_wgt = {lnL_phot_i:15.1f}, weight = {weight:.1e}')
else:
lnL_phot = 0
return lnL_phot
class PSPL_Solver_Hobson_Weighted(PSPL_Solver):
def log_likely(self, cube, verbose=False):
"""
Compute a log-likelihood where there is a hyperparameter,
alpha_k, that controls the weighting between each data k set.
This algorithm is described in Hobson et al. 2002.
Specifically, we are implementing Eq. 35.
Parameters
-----------
cube : list or dict
The dictionary or cube of the model parameters.
"""
# Fetch the model for these parameters.
model = self.get_model(cube)
# We are implementing the Hobson weighting scheme such that we
# explore and then marginalize over the hyperparameter, alpha_k (ak),
# where we have the kth data set, Dk.
# see Hobson et al. 2002 for details.
lnL = 0.0
##########
# Photometry
##########
if model.photometryFlag:
for i in range(self.n_phot_sets):
t_phot = self.data['t_phot' + str(i + 1)]
mag = self.data['mag' + str(i + 1)]
# additive or multiplicative error
mag_err = self.get_modified_mag_err(cube, i)
nk = len(mag)
nk21 = (nk / 2.0) + 1.0
chi2_m = model.get_chi2_photometry(t_phot, mag, mag_err, filt_index=i)
lnL_const_standard = model.get_lnL_constant(mag_err)
lnL_const_hobson = scipy.special.gammaln( nk21 ) + (nk21 * np.log(2))
# Equation 35 from Hobson
lnL_phot = lnL_const_standard.sum()
lnL_phot += -1.0 * nk21 * np.log(chi2_m.sum() + 2)
lnL_phot += lnL_const_hobson
lnL += lnL_phot
##########
# Astrometry
##########
if model.astrometryFlag:
for i in range(self.n_ast_sets):
# If no photometry
if len(self.map_phot_idx_to_ast_idx) == 0:
ast_filt_idx = i
else:
ast_filt_idx = self.map_phot_idx_to_ast_idx[i]
t_ast = self.data['t_ast' + str(i+1)]
x_obs = self.data['xpos' + str(i+1)]
y_obs = self.data['ypos' + str(i+1)]
x_err_obs = self.data['xpos_err' + str(i+1)]
y_err_obs = self.data['ypos_err' + str(i+1)]
nk = len(x_obs) + len(y_obs)
nk21 = (nk / 2.0) + 1.0
chi2_xy = model.get_chi2_astrometry(t_ast, x_obs, y_obs, x_err_obs, y_err_obs, ast_filt_idx=ast_filt_idx)
lnL_const_standard = model.get_lnL_constant(x_err_obs) + model.get_lnL_constant(y_err_obs)
lnL_const_hobson = scipy.special.gammaln( nk21 ) + (nk21 * np.log(2))
# Equation 35 from Hobson
lnL_ast = lnL_const_standard.sum()
lnL_ast += -1.0 * nk21 * np.log(chi2_xy.sum() + 2)
lnL_ast += lnL_const_hobson
lnL += lnL_ast
# Reporting
if verbose:
# self.plot_model_and_data(model)
# pdb.set_trace()
fmt = '{0:13s} = {1:f} '
for ff in range(self.n_params):
if isinstance(cube, dict) or isinstance(cube, Row):
pname = self.all_param_names[ff]
if ((isinstance(cube, dict) and pname in cube) or
(isinstance(cube, Row) and pname in cube.colnames)):
print(fmt.format(pname, cube[pname])),
else:
print(fmt.format(pname, -999.0)),
else:
print(fmt.format(self.all_param_names[ff], cube[ff])),
print(fmt.format('lnL_phot', lnL_phot)),
print(fmt.format('lnL_ast', lnL_ast)),
print(fmt.format('lnL', lnL))
return lnL
def hobson_weight_log_likely(self, ln_prob_dk_giv_ak_1):
"""
Implement a data-set-specific weighting scheme by using
a hyperparameter, alpha_k, for the kth data set as
described in Hobson et al. 2002.
Specifically, we are implementing Eq. 16 and 23-27, with the
prior described in Eq. 21.
We are not using the simplifications in Section 5 for now.
"""
# Get back to prob not ln(prob):
prob_dk_giv_ak_1 = np.exp(ln_prob_dk_giv_ak_1)
# Sample alpha_k hyperparameter
alpha_k_prior = scipy.stats.expon()
# print('Hobson: ', ln_prob_dk_giv_ak_1)
def integrand(ak, prob_dk_giv_ak_1, ln_prob_dk_giv_ak_1, ii):
# Prior probability for this ak
prob_ak = alpha_k_prior.pdf(ak)
ln_prob_ak = np.log(prob_ak)
# Normalization (over all data) for this ak
z_k_ak = np.sum(np.exp(ak * ln_prob_dk_giv_ak_1))
ln_z_k_ak = np.log(z_k_ak)
# Pull out just this single data point.
ln_prob_di_ak_1 = ln_prob_dk_giv_ak_1[ii]
ln_prob_d_ak = (ak * ln_prob_di_ak_1) + ln_prob_ak - ln_z_k_ak
# print(f'ak = {ak:.4f} ln_z_k_ak = {ln_z_k_ak} z_k_ak = {z_k_ak} ln_prob_d_ak={ln_prob_d_ak}')
prob_d_ak = np.exp(ln_prob_d_ak)
return prob_d_ak
prob_dk = np.zeros(len(prob_dk_giv_ak_1), dtype=float)
# for ii in range(len(prob_d_each)):
for ii in range(2):
# pdb.set_trace()
prob_dk[ii] = scipy.integrate.quad(integrand, 0, np.inf,
args=(prob_dk_giv_ak_1, ln_prob_dk_giv_ak_1, ii))[0]
# print(f' prob_dk = {prob_dk}')
lnL_dk = np.log(prob_dk)
return lnL_dk
def get_hobson_effective_weights(self, cube):
"""
Return the effective weights, alpha_k, for each
data set. Photometry first, then astrometry.
"""
eff_weights = np.empty(0, dtype=float)
# Fetch the model for these parameters.
model = self.get_model(cube)
# We are implementing the Hobson weighting scheme such that we
# explore and then marginalize over the hyperparameter, alpha_k (ak),
# where we have the kth data set, Dk.
# see Hobson et al. 2002 for details.
##########
# Photometry
##########
if model.photometryFlag:
for i in range(self.n_phot_sets):
t_phot = self.data['t_phot' + str(i + 1)]
mag = self.data['mag' + str(i + 1)]
# additive or multiplicative error
mag_err = self.get_modified_mag_err(cube, i)
nk = len(mag)
chi2_m = model.get_chi2_photometry(t_phot, mag, mag_err, filt_index=i)
ak_eff = nk / chi2_m.sum()
eff_weights = np.append(eff_weights, ak_eff)
##########
# Astrometry
##########
if model.astrometryFlag:
for i in range(self.n_ast_sets):
# If no photometry
if len(self.map_phot_idx_to_ast_idx) == 0:
ast_filt_idx = i
else:
ast_filt_idx = self.map_phot_idx_to_ast_idx[i]
t_ast = self.data['t_ast' + str(i+1)]
x_obs = self.data['xpos' + str(i+1)]
y_obs = self.data['ypos' + str(i+1)]
x_err_obs = self.data['xpos_err' + str(i+1)]
y_err_obs = self.data['ypos_err' + str(i+1)]
nk = len(x_obs) + len(y_obs)
chi2_xy = model.get_chi2_astrometry(t_ast, x_obs, y_obs, x_err_obs, y_err_obs, ast_filt_idx=ast_filt_idx)
ak_eff = nk / chi2_xy.sum()
eff_weights = np.append(eff_weights, ak_eff)
return eff_weights
#########################
### PRIOR GENERATORS ###
#########################
def make_gen(min, max):
return scipy.stats.uniform(loc=min, scale=max - min)
def make_norm_gen(mean, std):
return scipy.stats.norm(loc=mean, scale=std)
def make_lognorm_gen(mean, std):
""" Make a natural-log normal distribution for a variable.
The specified mean and std should be in the ln() space.
"""
return scipy.stats.lognorm(s=std, scale=np.exp(mean))
def make_log10norm_gen(mean_in_log10, std_in_log10):
"""Scale scipy lognorm from natural log to base 10.
Note the mean and std should be in the log10() space already.
Parameters
-------------
mean:
mean of the underlying log10 gaussian (i.e. a log10 quantity)
std:
variance of underlying log10 gaussian
"""
# Convert mean and std from log10 to ln.
return scipy.stats.lognorm(s=std_in_log10 * np.log(10), scale=np.exp(mean_in_log10 * np.log(10)))
def make_truncnorm_gen(mean, std, lo_cut, hi_cut):
"""lo_cut and hi_cut are in the units of sigma
"""
return scipy.stats.truncnorm(lo_cut, hi_cut,
loc=mean, scale=std)
def make_truncnorm_gen_with_bounds(mean, std, low_bound, hi_bound):
"""
low_bound and hi_bound are in the same units as mean and std
"""
assert hi_bound > low_bound
clipped_mean = min(max(mean, low_bound), hi_bound)
if clipped_mean == low_bound:
low_sigma = -0.01 * std
hi_sigma = (hi_bound - clipped_mean) / std
elif clipped_mean == hi_bound:
low_sigma = (low_bound - clipped_mean) / std
hi_sigma = 0.01 * std
else:
low_sigma = (low_bound - clipped_mean) / std
hi_sigma = (hi_bound - clipped_mean) / std
return scipy.stats.truncnorm(low_sigma, hi_sigma,
loc=clipped_mean, scale=std)
def make_t0_gen(t, mag):
"""Get an approximate t0 search range by finding the brightest point
and then searching days where flux is higher than 80% of this peak.
"""
mag_min = np.min(mag) # min mag = brightest
delta_mag = np.max(mag) - mag_min
idx = np.where(mag < (mag_min + (0.2 * delta_mag)))[0]
t0_min = t[idx].min()
t0_max = t[idx].max()
# Pad by and extra 40% in case of gaps.
t0_min -= 0.4 * (t0_max - t0_min)
t0_max += 0.4 * (t0_max - t0_min)
return make_gen(t0_min, t0_max)
def make_mag_base_gen(mag):
"""
Make a prior for baseline magnitude using the data.
"""
mean, med, std = sigma_clipped_stats(mag, sigma_lower=2, sigma_upper=4)
gen = make_truncnorm_gen(mean, 3 * std, -5, 5)
return gen
def make_mag_src_gen(mag):
"""
Make a prior for source magnitude using the data.
Allow negative blending.
"""
mean, med, std = sigma_clipped_stats(mag, sigma_lower=2, sigma_upper=4)
gen = make_gen(mean - 1, mean + 5)
return gen
def make_xS0_gen(pos):
posmin = pos.min() - 5 * pos.std()
posmax = pos.max() + 5 * pos.std()
# print('make_xS0_gen')
# print('posmin : ', posmin)
# print('posmax : ', posmax)
# print(' ')
return make_gen(posmin, posmax)
def make_xS0_norm_gen(pos):
posmid = 0.5 * (pos.min() + pos.max())
poswidth = np.abs(pos.max() - pos.min())
# print('make_xS0_norm_gen')
# print('posmid : ', posmid)
# print('poswidth : ', poswidth)
# print(' ')
return make_norm_gen(posmid, poswidth)
def make_muS_EN_gen(t, pos, scale_factor=100.0):
"""Get an approximate muS search range by looking at the best fit
straight line to the astrometry. Then allows lots of free space.
Inputs
------
t:
array of times in days
pos:
array of positions in arcsec
Returns
-------
gen:
uniform generator for velocity in mas/yr
"""
# Convert t to years temporarily.
t_yr = t / mmodel.days_per_year
# Reshaping stuff... convert (1,N) array into (N,) array
if (t_yr.ndim == 2 and t_yr.shape[0] == 1):
t_yr = t_yr.reshape(len(t_yr[0]))
pos = pos.reshape(len(pos[0]))
par, cov = np.polyfit(t_yr, pos, 1, cov=True)
vel = par[0] * 1e3 # mas/yr
vel_err = (cov[0][0] ** 0.5) * 1e3 # mas/yr
vel_lo = vel - scale_factor * vel_err
vel_hi = vel + scale_factor * vel_err
# print('make_muS_EN_gen')
# print('vel_lo : ', vel_lo)
# print('vel_hi : ', vel_hi)
# print(' ')
return make_gen(vel_lo, vel_hi)
def make_muS_EN_norm_gen(t, pos):
"""Get an approximate muS search range by looking at the best fit
straight line to the astrometry. Then allows lots of free space.
Parameters
------------
t:
array of times in days
pos:
array of positions in arcsec
Returns
--------
gen:
uniform generator for velocity in mas/yr
"""
# Convert t to years temporarily.
t_yr = t / mmodel.days_per_year
par, cov = np.polyfit(t_yr, pos, 1, cov=True)
vel = par[0] * 1e3 # mas/yr
vel_err = (cov[0][0] ** 0.5) * 1e3 # mas/yr
scale_factor = 10.0
# print('make_muS_EN_norm_gen')
# print('vel : ', vel)
# print('vel_1sigma : ', scale_factor * vel_err)
# print(' ')
return make_norm_gen(vel, scale_factor * vel_err)
def make_invgamma_gen(t_arr):
"""ADD DESCRIPTION
Parameters
------------
t_arr:
time array
"""
a,b = compute_invgamma_params(t_arr)
# print('inv gamma')
# print('a : ', a)
# print('b : ', b)
return scipy.stats.invgamma(a, scale=b)
def compute_invgamma_params(t_arr):
"""
| Based on function of same name from
<NAME>'s ``caustic`` package:
https://github.com/fbartolic/caustic
| Returns parameters of an inverse gamma distribution s.t.
* 1% of total prob. mass is assigned to values of :math:`t < t_{min}` and
* 1% of total prob. masss to values greater than t_{tmax}.
`t_{min}` is defined to be the median spacing between consecutive
data points in the time series and t_{max} is the total duration
of the time series.
Parameters
----------
t_arr : array
Array of times
Returns
-------
invgamma_a, invgamma_b : float (?)
The parameters a,b of the inverse gamma function.
"""
def solve_for_params(params, x_min, x_max):
lower_mass = 0.01
upper_mass = 0.99
# Trial parameters
alpha, beta = params
# Equation for the roots defining params which satisfy the constraint
cdf_l = scipy.stats.invgamma.cdf(x_min, alpha, scale=beta) - lower_mass,
cdf_u = scipy.stats.invgamma.cdf(x_max, alpha, scale=beta) - upper_mass,
return np.array([cdf_l, cdf_u]).reshape((2,))
# Compute parameters for the prior on GP hyperparameters
med_sep = np.median(np.diff(t_arr))
tot_dur = t_arr[-1] - t_arr[0]
invgamma_a, invgamma_b = scipy.optimize.fsolve(solve_for_params,
(0.001, 0.001),
(med_sep, tot_dur))
return invgamma_a, invgamma_b
def make_piS():
# piS prior comes from PopSyCLE:
# We will assume a truncated normal distribution with only a small-side truncation at ~20 kpc.
piS_mean = 0.1126 # mas
piS_std = 0.0213 # mas
piS_lo_cut = (0.05 - piS_mean) / piS_std # sigma
piS_hi_cut = 90. # sigma
return scipy.stats.truncnorm(piS_lo_cut, piS_hi_cut,
loc=piS_mean, scale=piS_std)
def make_fdfdt():
return scipy.stats.norm(loc=0, scale=1 / 365.25)
def random_prob(generator, x):
value = generator.ppf(x)
ln_prob = generator.logpdf(value)
return value, ln_prob
def weighted_quantile(values, quantiles, sample_weight=None,
values_sorted=False, old_style=False):
""" Very close to numplt.percentile, but supports weights.
Parameters
_____________
values:
numplt.array with data
quantiles:
array-like with many quantiles needed
sample_weight:
array-like of the same length as `array`
values_sorted: bool,
if True, then will avoid sorting of initial array
old_style:
if True, will correct output to be consistent with numplt.percentile.
Returns
--------
arr:
numplt.array with computed quantiles.
Notes
-------
.. note:: quantiles should be in [0, 1]!
"""
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(
quantiles <= 1), 'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with np.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
def split_param_filter_index1(s):
"""
Split a parameter name into the <string><number> components
where <string> is the parameter name and <number> is the filter
index (1-based). If there is no number at the end for a filter
index, then return None for the second argument.
Returns
----------
param_name : str
The name of the parameter.
filt_index : int (or None)
The 1-based filter index.
"""
param_name = s.rstrip('123456789')
if len(param_name) == len(s):
filt_index = None
else:
filt_index = int(s[len(param_name):])
return param_name, filt_index
def generate_params_dict(params, fitter_param_names):
"""
Take a list, dictionary, or astropy Row of fit parameters
and extra parameters and convert it into a well-formed dictionary
that can be fed straight into a model object.
The output object will only contain parameters specified
by name in fitter_param_names. Multi-filter photometry
parameters are treated specially and grouped together into an
array such as ['mag_src'] = [mag_src1, mag_src2].
Parameters
----------
params : list, dict, Row
Contains values of parameters. Note that if the
params are in a list, they need to be in the same
order as fitter_param_names. If the params are in
a dict or Row, then order is irrelevant.
fitter_param_names : list
The names of the parameters that will be
delivered, in order, in the output.
Returns
----------
params_dict : dict
Dictionary of the parameter names and values.
"""
skip_list = ['weights', 'logLike', 'add_err', 'mult_err']
multi_list = ['mag_src', 'mag_base', 'b_sff', 'mag_src_pri', 'mag_src_sec', 'fratio_bin']
multi_dict = ['gp_log_rho', 'gp_log_S0', 'gp_log_sigma', 'gp_rho', 'gp_log_omega04_S0', 'gp_log_omega0']
params_dict = {}
for i, param_name in enumerate(fitter_param_names):
# Skip some parameters.
if any([x in param_name for x in skip_list]):
continue
if isinstance(params, (dict, Row)):
key = param_name
else:
key = i
# Check to see if this is a multi-filter parameter. None if not.
filt_param, filt_idx = split_param_filter_index1(param_name)
# Handle global parameters (not filter dependent)
if filt_idx == None:
params_dict[param_name] = params[key]
else:
# Handle filter dependent parameters... 2 cases (list=required vs. dict=optional)
if filt_param in multi_list:
# Handle the filter-dependent fit parameters (required params).
# They need to be grouped as a list for input into a model.
if filt_param not in params_dict:
params_dict[filt_param] = []
# Add this filter to our list.
params_dict[filt_param].append(params[key])
if filt_param in multi_dict:
# Handle the optional filter-dependent fit parameters (required params).
# They need to be grouped as a dicionary for input into a model.
if filt_param not in params_dict:
params_dict[filt_param] = {}
# Add this filter to our dict. Note the switch to 0-based here.
params_dict[filt_param][filt_idx-1] = params[key]
return params_dict
########################################
### GENERAL USE AND SHARED FUNCTIONS ###
########################################
def pointwise_likelihood(data, model, filt_index=0):
"""Makes some plots to diagnose weirdness in GP fits.
"""
# Get the data out.
dat_t = data['t_phot' + str(filt_index + 1)]
dat_m = data['mag' + str(filt_index + 1)]
dat_me = data['mag_err' + str(filt_index + 1)]
# Make models.
# Decide if we sample the models at a denser time, or just the
# same times as the measurements.
pw_logL = np.zeros(len(dat_t))
for tt, time in enumerate(dat_t):
pw_logL[tt] = model.log_likely_photometry([dat_t[tt]], [dat_m[tt]], [dat_me[tt]], filt_index)
return pw_logL
def debug_gp_nan(data, model, filt_index=0):
"""Makes some plots to diagnose weirdness in GP fits.
"""
# Get the data out.
dat_t = data['t_phot' + str(filt_index + 1)]
dat_m = data['mag' + str(filt_index + 1)]
dat_me = data['mag_err' + str(filt_index + 1)]
# Make models.
# Decide if we sample the models at a denser time, or just the
# same times as the measurements.
mod_m_out, mod_m_out_std = model.get_photometry_with_gp(dat_t, dat_m, dat_me, filt_index, dat_t)
if mod_m_out is None:
print('GP not working at prediction times!')
mod_m_out = model.get_photometry(dat_t, filt_index)
mod_m_at_dat, mod_m_at_dat_std = model.get_photometry_with_gp(dat_t, dat_m, dat_me, filt_index)
bad_idx = np.nonzero(np.isnan(mod_m_at_dat))[0]
print('Number of nan: ', str(len(bad_idx)))
plt.figure(100, figsize=(10,10))
plt.clf()
plt.errorbar(dat_t, dat_m, yerr=dat_me, fmt='k.', alpha=0.2)
plt.errorbar(dat_t[bad_idx], dat_m[bad_idx], yerr=dat_me[bad_idx], fmt='ro', alpha=1)
plt.gca().invert_yaxis()
plt.xlabel('Time')
plt.ylabel('Mag')
plt.savefig('nans.png')
# Magnitude errors
plt.figure(101, figsize=(6,6))
plt.clf()
plt.hist(dat_me, label='All', bins=np.linspace(0, np.max(dat_me), 50), alpha=0.5)
plt.hist(dat_me[bad_idx], label='Bad', bins=np.linspace(0, np.max(dat_me), 50), alpha=0.5)
plt.yscale('log')
plt.xlabel('mag err')
plt.legend()
plt.savefig('nans_me_hist.png')
# Difference between time of point N and point N-1.
plt.figure(102, figsize=(6,6))
plt.clf()
plt.hist(dat_t[bad_idx] - dat_t[bad_idx-1], bins=np.logspace(-2, 2, 50), label='Bad', alpha=0.5)
plt.hist(dat_t[1:] - dat_t[:-1], bins=np.logspace(-2, 2, 50), label='All', alpha=0.5)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('delta t (days)')
plt.legend()
plt.savefig('nans_deltat_hist.png')
def plot_params(model):
"""Print parameters
"""
x0 = 0.05
y0 = 0.95
dy = 0.03
fig = plt.figure(1, figsize=(10, 10))
plt.subplots_adjust(left=0.1, top=0.95, bottom=0.05, right=0.95)
ax_lab = fig.add_subplot(111)
ax_lab.xaxis.set_visible(False)
ax_lab.yaxis.set_visible(False)
ax_lab.set_axis_off()
ax_lab.text(x0, y0 - 0 * dy, 'Model Parameters:', fontsize=10)
def get_param_value(pname):
if pname.endswith('_E') or pname.endswith('_N'):
pname_act = pname[:-2]
elif pname == 'log10_thetaE':
pname_act = 'thetaE_amp'
else:
pname_act = pname
pvalue = getattr(model, pname_act)
if pname.endswith('_E'):
pvalue = pvalue[0]
if pname.endswith('_N'):
pvalue = pvalue[1]
if pname == 'log10_thetaE':
pvalue = np.log10(pvalue)
return pvalue
for ff in range(len(model.fitter_param_names)):
pname = model.fitter_param_names[ff]
pvalu = get_param_value(pname)
fmt_str = '{0:s} = {1:.2f}'
if pname.startswith('x'):
fmt_str = '{0:s} = {1:.4f}'
ax_lab.text(x0, y0 - (ff + 1) * dy,
fmt_str.format(pname, pvalu),
fontsize=10)
nrow = len(model.fitter_param_names)
for ff in range(len(model.phot_param_names)):
pname = model.phot_param_names[ff]
pvalu = get_param_value(pname)
fmt_str = '{0:s} = {1:.2f}'
for rr in range(len(pvalu)):
ax_lab.text(x0, y0 - (nrow + 1) * dy,
fmt_str.format(pname + str(rr + 1), pvalu[rr]),
fontsize=10)
nrow += 1
nrow = 0
for ff in range(len(model.additional_param_names)):
pname = model.additional_param_names[ff]
pvalu = get_param_value(pname)
fmt_str = '{0:s} = {1:.2f}'
if pname in multi_filt_params:
for rr in range(len(pvalu)):
ax_lab.text(x0, y0 - (nrow + 1) * dy,
fmt_str.format(pname + str(rr + 1), pvalu[rr]),
fontsize=10)
nrow += 1
else:
ax_lab.text(x0 + 0.5, y0 - (ff + 1) * dy,
fmt_str.format(pname, pvalu),
fontsize=10)
nrow += 1
return fig
def plot_photometry(data, model, input_model=None, dense_time=True, residuals=True,
filt_index=0, zoomx=None, zoomy=None, zoomy_res=None, mnest_results=None,
N_traces=50, gp=False, fitter=None):
"""Get the data out.
"""
dat_t = data['t_phot' + str(filt_index + 1)]
dat_m = data['mag' + str(filt_index + 1)]
dat_me = data['mag_err' + str(filt_index + 1)]
# Make models.
# Decide if we sample the models at a denser time, or just the
# same times as the measurements.
if dense_time:
# 1 day sampling over whole range
mod_t = np.arange(dat_t.min(), dat_t.max(), 0.1)
else:
mod_t = dat_t
if gp:
mod_m_out, mod_m_out_std = model.get_photometry_with_gp(dat_t, dat_m, dat_me, filt_index, mod_t)
if mod_m_out is None:
print('GP not working at prediction times!')
mod_m_out = model.get_photometry(mod_t, filt_index)
mod_m_at_dat, mod_m_at_dat_std = model.get_photometry_with_gp(dat_t, dat_m, dat_me, filt_index)
if mod_m_at_dat is None:
print('GP not working at data times!')
mod_m_at_dat = model.get_photometry(dat_t, filt_index)
else:
mod_m_out = model.get_photometry(mod_t, filt_index)
mod_m_at_dat = model.get_photometry(dat_t, filt_index)
# Input Model
if input_model != None:
mod_m_in = input_model.get_photometry(mod_t, filt_index)
# fig = plt.figure(1, figsize=(15,15))
fig = plt.figure(1, figsize=(10,10))
plt.clf()
# plt.subplots_adjust(bottom=0.2, left=0.2)
plt.subplots_adjust(bottom=0.2, left=0.3)
# Decide if we are plotting residuals
if residuals:
# f1 = plt.gcf().add_axes([0.1, 0.3, 0.8, 0.6])
# f1 = plt.gcf().add_axes([0.1, 0.35, 0.8, 0.55])
# f2 = plt.gcf().add_axes([0.1, 0.1, 0.8, 0.2])
f1 = plt.gcf().add_axes([0.2, 0.45, 0.7, 0.45])
f2 = plt.gcf().add_axes([0.2, 0.15, 0.7, 0.25])
else:
plt.gca()
#####
# Data
#####
f1.errorbar(dat_t, dat_m, yerr=dat_me, fmt='k.', alpha=0.2, label='Data')
if input_model != None:
f1.plot(mod_t, mod_m_in, 'g-', label='Input')
f1.plot(mod_t, mod_m_out, 'r-', label='Model')
if gp and mod_m_out_std is not None:
f1.fill_between(mod_t, mod_m_out+mod_m_out_std, mod_m_out-mod_m_out_std,
color='r', alpha=0.3, edgecolor="none")
f1.set_ylabel('I (mag)')
f1.invert_yaxis()
f1.set_title('Input Data and Output Model')
f1.get_xaxis().set_visible(False)
f1.set_xlabel('t - t0 (days)')
f1.legend()
if zoomx is not None:
f1.set_xlim(zoomx[0], zoomx[1])
if zoomy is not None:
f1.set_ylim(zoomy[0], zoomy[1])
#####
# Traces
#####
if mnest_results is not None:
idx_arr = np.random.choice(np.arange(len(mnest_results['weights'])),
p=mnest_results['weights'],
size=N_traces)
trace_times = []
trace_magnitudes = []
for idx in idx_arr:
# # FIXME: This doesn't work if there are additional_param_names in the model
# # You will have extra arguments when passing in **params_dict into the model class.
# # FIXME 2: there needs to be a way to deal with multiples in additional_param_names
# params_dict = generate_params_dict(mnest_results[idx],
# mnest_results.colnames)
#
# trace_mod = model.__class__(**params_dict,
# raL=model.raL,
# decL=model.decL)
trace_mod = fitter.get_model(mnest_results[idx])
if gp:
trace_mag, trace_mag_std = trace_mod.get_photometry_with_gp(dat_t, dat_m, dat_me, filt_index, mod_t)
if trace_mag_std is None:
print('GP is not working at model times!')
continue
else:
trace_mag = trace_mod.get_photometry(mod_t, filt_index)
trace_times.append(mod_t)
trace_magnitudes.append(trace_mag)
f1.plot(mod_t, trace_mag,
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
#####
# Residuals
#####
if residuals:
f1.get_shared_x_axes().join(f1, f2)
f2.errorbar(dat_t, dat_m - mod_m_at_dat,
yerr=dat_me, fmt='k.', alpha=0.2)
f2.axhline(0, linestyle='--', color='r')
f2.set_xlabel('Time (HJD)')
f2.set_ylabel('Obs - Mod')
if zoomx is not None:
f2.set_xlim(zoomx[0], zoomx[1])
if zoomy is not None:
f2.set_ylim(zoomy[0], zoomy[1])
if zoomy_res is not None:
f2.set_ylim(zoomy_res[0], zoomy_res[1])
return fig
def plot_photometry_gp(data, model, input_model=None, dense_time=True, residuals=True,
filt_index=0, zoomx=None, zoomy=None, zoomy_res=None, mnest_results=None,
N_traces=50, gp=False):
gs_kw = dict(height_ratios=[1,2,1])
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True,
figsize=(15,15),
gridspec_kw=gs_kw)
# plt.clf()
plt.subplots_adjust(bottom=0.1, left=0.1)
# Get the data out.
dat_t = data['t_phot' + str(filt_index + 1)]
dat_m = data['mag' + str(filt_index + 1)]
dat_me = data['mag_err' + str(filt_index + 1)]
# Make models.
# Decide if we sample the models at a denser time, or just the
# same times as the measurements.
if dense_time:
# 1 day sampling over whole range
mod_t = np.arange(dat_t.min(), dat_t.max(), 1)
else:
mod_t = dat_t
mod_m_out_gp, mod_m_out_std_gp = model.get_photometry_with_gp(dat_t, dat_m, dat_me, filt_index, mod_t)
mod_m_at_dat_gp, mod_m_at_dat_std_gp = model.get_photometry_with_gp(dat_t, dat_m, dat_me, filt_index)
mod_m_out = model.get_photometry(mod_t, filt_index)
mod_m_at_dat = model.get_photometry(dat_t, filt_index)
if mod_m_out_gp is not None:
# Input Model
if input_model != None:
mod_m_in = input_model.get_photometry(mod_t, filt_index)
#####
# Data only
#####
ax1.errorbar(dat_t, dat_m, yerr=dat_me, fmt='k.', alpha=0.2, label='Raw Data')
ax1.set_ylabel('I (mag)')
ax1.invert_yaxis()
ax1.get_xaxis().set_visible(False)
ax1.legend()
#####
# Data minus model (just GP)
#####
ax2.errorbar(dat_t, dat_m - (mod_m_at_dat_gp - mod_m_at_dat), yerr=dat_me, fmt='k.', alpha=0.2, label='Detrended data')
ax2.plot(mod_t, mod_m_out, 'r-', label='Model', lw=1)
ax2.set_ylabel('I (mag)')
ax2.invert_yaxis()
ax2.get_xaxis().set_visible(False)
ax2.legend()
#####
# Data minus GP (just model/detrended data)
#####
ax3.axhline(y=0, color='dimgray', ls=':', alpha=0.8)
ax3.errorbar(dat_t, dat_m - mod_m_at_dat, yerr=dat_me, fmt='k.', alpha=0.2, label='Correlated Noise')
ax3.plot(mod_t, mod_m_out_gp - mod_m_out, 'r-', label='GP', lw=1, zorder=5000)
ax3.set_ylabel('I (mag)')
ax3.invert_yaxis()
ax3.set_xlabel('Time (HJD)')
ax3.legend()
if zoomx is not None:
ax1.set_xlim(zoomx[0], zoomx[1])
ax2.set_xlim(zoomx[0], zoomx[1])
ax3.set_xlim(zoomx[0], zoomx[1])
if zoomy is not None:
ax1.set_ylim(zoomy[0], zoomy[1])
ax2.set_ylim(zoomy[0], zoomy[1])
if zoomy_res is not None:
ax3.set_ylim(zoomy_res[0], zoomy_res[1])
return fig
else:
return None
def plot_astrometry(data, model, input_model=None, dense_time=True,
residuals=True, n_phot_sets=0, filt_index=0, ast_filt_index=0,
mnest_results=None, N_traces=50, fitter=None):
"""Astrometry on the sky
"""
fig_list = []
plt.close(n_phot_sets + 1)
fig = plt.figure(n_phot_sets + 1, figsize=(10, 10)) # PLOT 1
fig_list.append(fig)
plt.clf()
# Get the data out.
dat_x = data['xpos' + str(filt_index + 1)] * 1e3
dat_y = data['ypos' + str(filt_index + 1)] * 1e3
dat_xe = data['xpos_err' + str(filt_index + 1)] * 1e3
dat_ye = data['ypos_err' + str(filt_index + 1)] * 1e3
dat_t = data['t_ast' + str(filt_index + 1)]
if (dat_xe.ndim == 2 and dat_xe.shape[0] == 1):
dat_t = dat_t.reshape(len(dat_t[0]))
dat_x = dat_x.reshape(len(dat_x[0]))
dat_y = dat_y.reshape(len(dat_y[0]))
dat_xe = dat_xe.reshape(len(dat_xe[0]))
dat_ye = dat_ye.reshape(len(dat_ye[0]))
# Data
plt.errorbar(dat_x, dat_y, xerr=dat_xe, yerr=dat_ye,
fmt='k.', label='Data')
# Decide if we sample the models at a denser time, or just the
# same times as the measurements.
if dense_time:
# 1 day sampling over whole range
t_mod = np.arange(dat_t.min(), dat_t.max(), 1)
else:
t_mod = dat_t
# Model - usually from fitter
pos_out = model.get_astrometry(t_mod, ast_filt_idx=ast_filt_index)
plt.plot(pos_out[:, 0] * 1e3, pos_out[:, 1] * 1e3, 'r-', label='Model')
# Input model
if input_model != None:
pos_in = input_model.get_astrometry(t_mod, ast_filt_idx=ast_filt_index)
plt.plot(pos_in[:, 0] * 1e3, pos_in[:, 1] * 1e3, 'g-', label='Input Model')
#####
# Traces
#####
if mnest_results is not None:
idx_arr = np.random.choice(np.arange(len(mnest_results['weights'])),
p=mnest_results['weights'],
size=N_traces)
trace_posxs = []
trace_posys = []
trace_posxs_no_pm = []
trace_posys_no_pm = []
for idx in idx_arr:
trace_mod = fitter.get_model(mnest_results[idx])
trace_pos = trace_mod.get_astrometry(t_mod, ast_filt_idx=ast_filt_index)
trace_pos_no_pm = trace_mod.get_astrometry(t_mod, ast_filt_idx=ast_filt_index) - trace_mod.get_astrometry_unlensed(t_mod)
trace_posxs.append(trace_pos[:, 0] * 1e3)
trace_posys.append(trace_pos[:, 1] * 1e3)
trace_posxs_no_pm.append(trace_pos_no_pm[:, 0] * 1e3)
trace_posys_no_pm.append(trace_pos_no_pm[:, 1] * 1e3)
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(trace_posxs[idx], trace_posys[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
plt.gca().invert_xaxis()
plt.xlabel(r'$\Delta \alpha^*$ (mas)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.legend(fontsize=12)
#####
# Astrometry vs. time
# x = RA, y = Dec
#####
plt.close(n_phot_sets + 2)
fig = plt.figure(n_phot_sets + 2, figsize=(10, 10)) # PLOT 2
fig_list.append(fig)
plt.clf()
plt.subplots_adjust(bottom=0.25, left=0.25)
# Decide if we're plotting residuals
if residuals:
f1 = plt.gcf().add_axes([0.15, 0.3, 0.8, 0.6])
f2 = plt.gcf().add_axes([0.15, 0.1, 0.8, 0.2])
else:
plt.gca()
f1.errorbar(dat_t, dat_x, yerr=dat_xe, fmt='k.', label='Data')
f1.plot(t_mod, pos_out[:, 0] * 1e3, 'r-', label='Model')
if input_model != None:
f1.plot(t_mod, pos_in[:, 0] * 1e3, 'g-', label='Input Model')
f1.set_xlabel('t - t0 (days)')
f1.set_ylabel(r'$\Delta \alpha^*$ (mas)')
f1.legend()
# Decide if plotting traces
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
f1.plot(t_mod, trace_posxs[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
if residuals:
f1.get_xaxis().set_visible(False)
f1.get_shared_x_axes().join(f1, f2)
f2.errorbar(dat_t, dat_x - model.get_astrometry(dat_t, ast_filt_idx=ast_filt_index)[:,0] * 1e3,
yerr=dat_xe, fmt='k.', alpha=0.2)
f2.axhline(0, linestyle='--', color='r')
f2.set_xlabel('Time (HJD)')
f2.set_ylabel('Obs - Mod')
plt.close(n_phot_sets + 3)
fig = plt.figure(n_phot_sets + 3, figsize=(10, 10)) # PLOT 3
fig_list.append(fig)
plt.clf()
plt.subplots_adjust(bottom=0.25, left=0.25)
# Decide if we're plotting residuals
if residuals:
f1 = plt.gcf().add_axes([0.15, 0.3, 0.8, 0.6])
f2 = plt.gcf().add_axes([0.15, 0.1, 0.8, 0.2])
else:
plt.gca()
f1.errorbar(dat_t, dat_y, yerr=dat_ye, fmt='k.', label='Data')
f1.plot(t_mod, pos_out[:, 1] * 1e3, 'r-', label='Model')
if input_model != None:
f1.plot(t_mod, pos_in[:, 1] * 1e3, 'g-', label='Input')
f1.set_xlabel('t - t0 (days)')
f1.set_ylabel(r'$\Delta \delta$ (mas)')
f1.legend()
# Decide if plotting traces
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
f1.plot(t_mod, trace_posys[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
if residuals:
f1.get_xaxis().set_visible(False)
f1.get_shared_x_axes().join(f1, f2)
f2.errorbar(dat_t,
dat_y - model.get_astrometry(dat_t, ast_filt_idx=ast_filt_index)[:,1] * 1e3,
yerr=dat_ye, fmt='k.', alpha=0.2)
f2.axhline(0, linestyle='--', color='r')
f2.set_xlabel('Time (HJD)')
f2.set_ylabel('Obs - Mod')
#####
# Remove the unlensed motion (proper motion)
# astrometry vs. time
#####
# Make the model unlensed points.
p_mod_unlens_tdat = model.get_astrometry_unlensed(dat_t)
x_mod_tdat = p_mod_unlens_tdat[:, 0]
y_mod_tdat = p_mod_unlens_tdat[:, 1]
x_no_pm = data['xpos' + str(filt_index + 1)] - x_mod_tdat
y_no_pm = data['ypos' + str(filt_index + 1)] - y_mod_tdat
# Make the dense sampled model for the same plot
dp_tmod_unlens = model.get_astrometry(t_mod, ast_filt_idx=ast_filt_index) - model.get_astrometry_unlensed(t_mod)
x_mod_no_pm = dp_tmod_unlens[:, 0]
y_mod_no_pm = dp_tmod_unlens[:, 1]
# Long time
# baseline = np.max((2*(dat_t.max() - dat_t.min()), 5*model.tE))
# longtime = np.arange(model.t0 - baseline, model.t0 + baseline, 1)
baseline = 3*(dat_t.max() - dat_t.min())
longtime = np.arange(t_mod.min()-baseline, t_mod.max()+baseline, 1)
dp_tmod_unlens_longtime = model.get_astrometry(longtime) - model.get_astrometry_unlensed(longtime)
x_mod_no_pm_longtime = dp_tmod_unlens_longtime[:, 0]
y_mod_no_pm_longtime = dp_tmod_unlens_longtime[:, 1]
# Make the dense sampled model for the same plot for INPUT model
if input_model != None:
dp_tmod_unlens_in = input_model.get_astrometry(t_mod, ast_filt_idx=ast_filt_index) - input_model.get_astrometry_unlensed(t_mod)
x_mod_no_pm_in = dp_tmod_unlens_in[:, 0]
y_mod_no_pm_in = dp_tmod_unlens_in[:, 1]
if (x_no_pm.ndim == 2 and x_no_pm.shape[0] == 1):
x_no_pm = x_no_pm.reshape(len(x_no_pm[0]))
y_no_pm = y_no_pm.reshape(len(y_no_pm[0]))
# Prep some colorbar stuff
cmap = plt.cm.viridis
norm = plt.Normalize(vmin=dat_t.min(), vmax=dat_t.max())
smap = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
smap.set_array([])
plt.close(n_phot_sets + 4)
fig = plt.figure(n_phot_sets + 4, figsize=(10, 10)) # PLOT 4
fig_list.append(fig)
plt.clf()
plt.errorbar(dat_t, x_no_pm * 1e3,
yerr=dat_xe, fmt='k.', label='Data')
plt.plot(t_mod, x_mod_no_pm * 1e3, 'r-', label='Model')
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(t_mod, trace_posxs_no_pm[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
if input_model != None:
plt.plot(t_mod, x_mod_no_pm_in * 1e3, 'g-', label='Input')
plt.xlabel('t - t0 (days)')
plt.ylabel(r'$\Delta \alpha^*$ (mas)')
plt.legend()
plt.close(n_phot_sets + 5)
fig = plt.figure(n_phot_sets + 5, figsize=(10, 10)) # PLOT 5
fig_list.append(fig)
plt.clf()
plt.errorbar(dat_t, y_no_pm * 1e3,
yerr=dat_ye, fmt='k.', label='Data')
plt.plot(t_mod, y_mod_no_pm * 1e3, 'r-', label='Model')
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(t_mod, trace_posys_no_pm[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
if input_model != None:
plt.plot(t_mod, y_mod_no_pm_in * 1e3, 'g-', label='Input')
plt.xlabel('t - t0 (days)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.legend()
plt.close(n_phot_sets + 6)
fig = plt.figure(n_phot_sets + 6) # PLOT 6
fig_list.append(fig)
plt.clf()
plt.scatter(x_no_pm * 1e3, y_no_pm * 1e3, c=dat_t,
cmap=cmap, norm=norm, s=5)
plt.errorbar(x_no_pm * 1e3, y_no_pm * 1e3,
xerr=dat_xe, yerr=dat_ye,
fmt='none', ecolor=smap.to_rgba(dat_t))
plt.scatter(x_mod_no_pm * 1e3, y_mod_no_pm * 1e3, c=t_mod, cmap=cmap,
norm=norm)
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(trace_posxs_no_pm[idx], trace_posys_no_pm[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
plt.gca().invert_xaxis()
plt.axis('equal')
plt.xlabel(r'$\Delta \alpha^*$ (mas)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.colorbar()
#####
# Astrometry on the sky
#####
plt.close(n_phot_sets + 7)
fig = plt.figure(n_phot_sets + 7, figsize=(10, 10)) # PLOT 7
fig_list.append(fig)
plt.clf()
# Data
plt.errorbar(dat_x, dat_y,
xerr=dat_xe, yerr=dat_ye,
fmt='k.', label='Data')
# Decide if we sample the models at a denser time, or just the
# same times as the measurements.
if dense_time:
# 1 day sampling over whole range
t_mod = np.arange(dat_t.min(), dat_t.max(), 1)
else:
t_mod = data['t_ast']
# Model - usually from fitter
pos_out = model.get_astrometry(t_mod)
pos_out_unlens = model.get_astrometry_unlensed(longtime)
plt.plot(pos_out[:, 0] * 1e3, pos_out[:, 1] * 1e3, 'r-', label='Model')
plt.plot(pos_out_unlens[:, 0] * 1e3, pos_out_unlens[:, 1] * 1e3, 'b:', label='Model unlensed')
# Input model
if input_model != None:
pos_in = input_model.get_astrometry(t_mod)
plt.plot(pos_in[:, 0] * 1e3, pos_in[:, 1] * 1e3, 'g-', label='Input Model')
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(trace_posxs[idx], trace_posys[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
plt.gca().invert_xaxis()
plt.xlabel(r'$\Delta \alpha^*$ (mas)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.legend(fontsize=12)
plt.close(n_phot_sets + 8)
fig = plt.figure(n_phot_sets + 8, figsize=(10, 10)) # PLOT 8
fig_list.append(fig)
plt.clf()
plt.errorbar(dat_t, x_no_pm * 1e3,
yerr=dat_xe, fmt='k.', label='Data')
plt.plot(longtime, x_mod_no_pm_longtime * 1e3, 'r-', label='Model')
plt.xlabel('t - t0 (days)')
plt.ylabel(r'$\Delta \alpha^*$ (mas)')
plt.legend()
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(t_mod, trace_posxs_no_pm[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
plt.close(n_phot_sets + 9)
fig = plt.figure(n_phot_sets + 9, figsize=(10, 10)) # PLOT 9
fig_list.append(fig)
plt.clf()
plt.errorbar(dat_t, y_no_pm * 1e3,
yerr=dat_ye, fmt='k.', label='Data')
plt.plot(longtime, y_mod_no_pm_longtime * 1e3, 'r-', label='Model')
plt.xlabel('t - t0 (days)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.legend()
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(t_mod, trace_posys_no_pm[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
# Prep some colorbar stuff
cmap = plt.cm.viridis
norm = plt.Normalize(vmin=dat_t.min(), vmax=dat_t.max())
smap = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
smap.set_array([])
plt.close(n_phot_sets + 10)
fig = plt.figure(n_phot_sets + 10) # PLOT 10
fig_list.append(fig)
plt.clf()
plt.scatter(x_no_pm * 1e3, y_no_pm * 1e3, c=dat_t,
cmap=cmap, norm=norm, s=5)
plt.errorbar(x_no_pm * 1e3, y_no_pm * 1e3,
xerr=dat_xe, yerr=dat_ye,
fmt='none', ecolor=smap.to_rgba(dat_t))
plt.colorbar()
plt.scatter(x_mod_no_pm_longtime * 1e3, y_mod_no_pm_longtime * 1e3, s=1)
# c=longtime, cmap=cmap, norm=norm, s=1)
if mnest_results is not None:
for idx in np.arange(len(idx_arr)):
plt.plot(trace_posys_no_pm[idx], trace_posxs_no_pm[idx],
color='c',
alpha=0.5,
linewidth=1,
zorder=-1)
plt.gca().invert_xaxis()
plt.axis('equal')
plt.xlabel(r'$\Delta \alpha^*$ (mas)')
plt.ylabel(r'$\Delta \delta$ (mas)')
return fig_list
def plot_astrometry_on_sky(data, model):
t_mod = np.arange(data['t_ast1'].min() - 300.0, data['t_ast1'].max() + 300.0, 5.0)
pos_out = model.get_astrometry(t_mod)
pos_in = model.get_astrometry_unlensed(t_mod)
lens_in = model.get_lens_astrometry(t_mod)
plt.close(1)
fig = plt.figure(1, figsize=(16, 4))
plt.subplots_adjust(wspace=0.5, top=0.90)
ast_colors = ['maroon', 'navy', 'purple', 'steelblue']
# Plot the data: RA vs. time
plt.subplot(131)
for ii in range(len(data['ast_data'])):
suff = str(ii+1)
plt.errorbar(data['t_ast'+suff], data['xpos'+suff]*1e3,
yerr=data['xpos_err'+suff]*1e3,
marker='.', color=ast_colors[ii], ls='none',
label=data['ast_data'][ii])
plt.plot(t_mod, pos_out[:, 0]*1e3, 'r-', label='Src-Lensed')
plt.plot(t_mod, pos_in[:, 0]*1e3, 'r--', label='Src-Unlensed')
plt.plot(t_mod, lens_in[:, 0]*1e3, 'k-.', label='Lens')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta \alpha$ (mas)')
# plt.ylim(228, 233)
fig.legend(loc='lower center', ncol=5, bbox_to_anchor=(0.55, 0.95))
# Plot the data: Dec vs. time
plt.subplot(132)
for ii in range(len(data['ast_data'])):
suff = str(ii+1)
plt.errorbar(data['t_ast'+suff], data['ypos'+suff]*1e3,
yerr=data['ypos_err'+suff]*1e3,
marker='.', color=ast_colors[ii], ls='none')
plt.plot(t_mod, pos_out[:, 1]*1e3, 'r-')
plt.plot(t_mod, pos_in[:, 1]*1e3, 'r--')
plt.plot(t_mod, lens_in[:, 1]*1e3, 'k-.')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.xlabel('Time (MJD)')
# Plot the data: Dec vs. time
plt.subplot(133)
for ii in range(len(data['ast_data'])):
suff = str(ii+1)
plt.errorbar(data['xpos'+suff]*1e3, data['ypos'+suff]*1e3,
xerr=data['xpos_err'+suff]*1e3, yerr=data['ypos_err'+suff]*1e3,
marker='.', color=ast_colors[ii], ls='none')
plt.plot(pos_out[:, 0]*1e3, pos_out[:, 1]*1e3, 'r-')
plt.plot(pos_in[:, 0]*1e3, pos_in[:, 1]*1e3, 'r--')
plt.plot(lens_in[:, 0]*1e3, lens_in[:, 1]*1e3, 'k-.')
plt.xlabel(r'$\Delta \alpha$ (mas)')
plt.ylabel(r'$\Delta \delta$ (mas)')
plt.gca().invert_xaxis()
plt.axis('equal')
return fig
def plot_astrometry_proper_motion_removed(data, model):
"""Proper Motion Subtracted
"""
t_mod = np.arange(data['t_ast1'].min() - 300.0, data['t_ast1'].max() + 300.0, 5.0)
pos_out = model.get_astrometry(t_mod)
pos_in = model.get_astrometry_unlensed(t_mod)
lens_in = model.get_lens_astrometry(t_mod)
pos_out -= model.muS[np.newaxis, :] * 1e-3 * (t_mod[:, np.newaxis] - model.t0) / 365.25
pos_in -= model.muS[np.newaxis, :] * 1e-3 * (t_mod[:, np.newaxis] - model.t0) / 365.25
lens_in -= model.muS[np.newaxis, :] * 1e-3 * (t_mod[:, np.newaxis] - model.t0) / 365.25
plt.close('all')
fig = plt.figure(figsize=(16, 4))
plt.subplots_adjust(wspace=0.5, top=0.90)
ast_colors = ['maroon', 'navy', 'purple']
# Plot the data: RA vs. time
plt.subplot(131)
for ii in range(len(data['ast_data'])):
suff = str(ii+1)
x = data['xpos'+suff] - (model.muS[0] * 1e-3 * (data['t_ast'+suff] - model.t0) / 365.25)
plt.errorbar(data['t_ast'+suff], x*1e3,
yerr=data['xpos_err'+suff]*1e3,
marker='.', color=ast_colors[ii],
ls='none', label=data['ast_data'][ii])
plt.plot(t_mod, pos_out[:, 0]*1e3, 'r-', label='Src-Lensed')
plt.plot(t_mod, pos_in[:, 0]*1e3, ls='--', color='orange', label='Src-Unlensed')
plt.plot(t_mod, lens_in[:, 0]*1e3, 'k-.', label='Lens')
plt.xlabel('Time (MJD)')
plt.ylabel(r'$\Delta \alpha$ - PM (mas)')
plt.ylim(x.min()*1e3-2, x.max()*1e3+2)
fig.legend(loc='lower center', ncol=5, bbox_to_anchor=(0.55, 0.95))
# Plot the data: Dec vs. time
plt.subplot(132)
for ii in range(len(data['ast_data'])):
suff = str(ii+1)
y = data['ypos'+suff] - (model.muS[1] * 1e-3 * (data['t_ast'+suff] - model.t0) / 365.25)
plt.errorbar(data['t_ast'+suff], y*1e3,
yerr=data['ypos_err'+suff]*1e3,
marker='.', color=ast_colors[ii], ls='none')
plt.plot(t_mod, pos_out[:, 1]*1e3, 'r-')
plt.plot(t_mod, pos_in[:, 1]*1e3, ls='--', color='orange')
plt.plot(t_mod, lens_in[:, 1]*1e3, 'k-.')
plt.ylabel(r'$\Delta \delta$ - PM (mas)')
plt.xlabel('Time (MJD)')
plt.ylim(y.min()*1e3-2, y.max()*1e3+2)
# Plot the data: Dec vs. time
plt.subplot(133)
for ii in range(len(data['ast_data'])):
suff = str(ii+1)
x = data['xpos'+suff] - (model.muS[0] * 1e-3 * (data['t_ast'+suff] - model.t0) / 365.25)
y = data['ypos'+suff] - (model.muS[1] * 1e-3 * (data['t_ast'+suff] - model.t0) / 365.25)
plt.errorbar(x*1e3, y*1e3,
xerr=data['xpos_err'+suff]*1e3, yerr=data['ypos_err'+suff]*1e3,
marker='.', color=ast_colors[ii], ls='none')
plt.plot(pos_out[:, 0]*1e3, pos_out[:, 1]*1e3, 'r-')
plt.plot(pos_in[:, 0]*1e3, pos_in[:, 1]*1e3, ls='--', color='orange')
plt.plot(lens_in[:, 0]*1e3, lens_in[:, 1]*1e3, 'k-.')
plt.xlabel(r'$\Delta \alpha$ - PM (mas)')
plt.ylabel(r'$\Delta \delta$ - PM (mas)')
plt.axis('equal')
plt.xlim(x.min()*1e3-1, x.max()*1e3+1)
plt.ylim(y.min()*1e3-1, y.max()*1e3+1)
plt.gca().invert_xaxis()
return fig
def quantiles(mnest_results, sigma=1):
"""
Calculate the median and N sigma credicble interval.
Parameters
----------
mnest_results : astropy table
The table that comes out of load_mnest_results.
sigma : int, optional
1, 2, or 3 sigma to determine which credible interval
to return.
"""
pars = mnest_results.colnames
weights = mnest_results['weights']
sumweights = | np.sum(weights) | numpy.sum |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 19:00:00 2017
@author: gsutanto
"""
import numpy as np
import tensorflow as tf
import random
import os
import sys
import copy
import time
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import glob
from colorama import init, Fore, Back, Style
init()
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../utilities/'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../neural_nets/feedforward/'))
from utilities import *
from FeedForwardNeuralNetwork import *
# Seed the random variables generator:
random.seed(38)
np.random.seed(38)
def generate1stOrderTimeAffineDynSys(init_val, dt, tau, alpha):
traj_length = int(round(tau/dt))
traj = np.zeros(traj_length)
x = init_val
for t in range(traj_length):
traj[t] = x
x = (1 - ((dt/tau) * alpha)) * x
return traj
if __name__ == "__main__":
plt.close('all')
base_tau = 0.5
dt = 1/300.0
alpha = 25.0/3.0 # similar to (1st order) canonical system's alpha
plt_color_code = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
N_traj = len(plt_color_code)
stretched_traj_length = int(round(base_tau/dt * (N_traj + 1)/2))
trajs = [None] * N_traj
data_output = [None] * N_traj
data_input = [None] * N_traj
ax_plt = [None] * 2
plt_label = [None] * 2
fig, (ax_plt[0], ax_plt[1]) = plt.subplots(2, sharex=True, sharey=True)
for i in range(N_traj):
tau = (i+1) * base_tau
traj = generate1stOrderTimeAffineDynSys(1.0, dt, tau, alpha)
trajs[i] = traj
data_output[i] = traj[1:] - traj[:-1] # = C_{t} - C_{t-1}
data_input[i] = ((dt/tau) * traj[:-1]) # (dt/tau) * C_{t-1}
plt_label[0] = 'traj ' + str(i+1) + ', tau=' + str(tau) + 's'
plt_label[1] = 'traj ' + str(i+1)
stretched_traj = stretchTrajectory(traj, stretched_traj_length)
ax_plt[0].plot(traj,
color=plt_color_code[i],
label=plt_label[0])
ax_plt[1].plot(stretched_traj,
color=plt_color_code[i],
label=plt_label[1])
ax_plt[0].set_title('Unstretched vs Stretched Trajectories of 1st Order Time-Affine Dynamical Systems, dt=(1/' + str(1.0/dt) + ')s')
ax_plt[0].set_ylabel('Unstretched')
ax_plt[1].set_ylabel('Stretched')
ax_plt[1].set_xlabel('Time Index')
for p in range(2):
ax_plt[p].legend()
# Fine-tune figure; make subplots close to each other and hide x ticks for
# all but bottom plot.
# f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
X = np.vstack([di.reshape(di.shape[0],1) for di in data_input]).astype(np.float32)
Y = np.vstack([do.reshape(do.shape[0],1) for do in data_output]).astype(np.float32)
N_NN_reinit_trials = 1
batch_size = 128
TF_max_train_iters = 100001
# Dropouts:
tf_train_dropout_keep_prob = 0.5
# L2 Regularization Constant
beta = 0.0
logs_path = "/tmp/ffnn/"
NN_name = 'my_ffnn'
fraction_train_dataset = 0.85
fraction_test_dataset = 0.075
chunk_size = 1
# Initial Learning Rate
init_learning_rate = 0.001
# Define Neural Network Topology
topology = [1, 1]
# Define Neural Network Activation Function
ffnn_hidden_layer_activation_func_list = []
N_data = X.shape[0]
D_input = X.shape[1]
D_output = Y.shape[1]
print('N_data =', N_data)
print('D_input =', D_input)
print('D_output =', D_output)
# Permutation with Chunks (for Stochastic Gradient Descent (SGD))
data_idx_chunks = list(chunks(range(N_data), chunk_size))
N_chunks = len(data_idx_chunks)
N_train_chunks = np.round(fraction_train_dataset * N_chunks).astype(int)
N_test_chunks = np.round(fraction_test_dataset * N_chunks).astype(int)
N_valid_chunks = N_chunks - N_train_chunks - N_test_chunks
chunk_permutation = np.random.permutation(N_chunks)
chunk_idx_train = np.sort(chunk_permutation[0:N_train_chunks], 0)
chunk_idx_valid = np.sort(chunk_permutation[N_train_chunks:(N_train_chunks+N_valid_chunks)], 0)
chunk_idx_test = np.sort(chunk_permutation[(N_train_chunks+N_valid_chunks):N_chunks], 0)
idx_train_dataset = np.concatenate([data_idx_chunks[i] for i in chunk_idx_train])
idx_valid_dataset = np.concatenate([data_idx_chunks[i] for i in chunk_idx_valid])
idx_test_dataset = np.concatenate([data_idx_chunks[i] for i in chunk_idx_test])
# Training Dataset Index is Permuted for Stochastic Gradient Descent (SGD)
permuted_idx_train_dataset = idx_train_dataset[np.random.permutation(len(idx_train_dataset))]
assert (((set(permuted_idx_train_dataset).union(set(idx_valid_dataset))).union(set(idx_test_dataset))) == set(np.arange(N_data))), "NOT all data is utilized!"
X_train = X[permuted_idx_train_dataset,:]
X_valid = X[idx_valid_dataset,:]
X_test = X[idx_test_dataset,:]
Y_train = Y[permuted_idx_train_dataset,:]
Y_valid = Y[idx_valid_dataset,:]
Y_test = Y[idx_test_dataset,:]
N_train_dataset = X_train.shape[0]
N_valid_dataset = X_valid.shape[0]
N_test_dataset = X_test.shape[0]
print('N_train_dataset =', N_train_dataset)
print('N_valid_dataset =', N_valid_dataset)
print('N_test_dataset =', N_test_dataset)
# Build the complete graph for feeding inputs, training, and saving checkpoints.
ff_nn_graph = tf.Graph()
with ff_nn_graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_X_batch = tf.placeholder(tf.float32, shape=[batch_size, D_input], name="tf_train_X_batch_placeholder")
tf_train_X = tf.constant(X_train, name="tf_train_X_constant")
tf_valid_X = tf.constant(X_valid, name="tf_valid_X_constant")
tf_test_X = tf.constant(X_test, name="tf_test_X_constant")
tf_train_Y_batch = tf.placeholder(tf.float32, shape=[batch_size, D_output], name="tf_train_Y_batch_placeholder")
tf_train_Y = tf.constant(Y_train, name="tf_train_Y_constant")
tf_valid_Y = tf.constant(Y_valid, name="tf_valid_Y_constant")
tf_test_Y = tf.constant(Y_test, name="tf_test_Y_constant")
FFNN = FeedForwardNeuralNetwork(NN_name, topology, ffnn_hidden_layer_activation_func_list)
# Build the Prediction Graph (that computes predictions from the inference model).
train_batch_prediction = FFNN.performNeuralNetworkPrediction(tf_train_X_batch, tf_train_dropout_keep_prob)
# Build the Training Graph (that calculate and apply gradients).
train_op, loss = FFNN.performNeuralNetworkTraining(train_batch_prediction, tf_train_Y_batch,
init_learning_rate, beta)
# Create a summary:
tf.summary.scalar("loss", loss)
# merge all summaries into a single "operation" which we can execute in a session
summary_op = tf.summary.merge_all()
# Predictions for the training, validation, and test data.
train_prediction = FFNN.performNeuralNetworkPrediction(tf_train_X, 1.0)
valid_prediction = FFNN.performNeuralNetworkPrediction(tf_valid_X, 1.0)
test_prediction = FFNN.performNeuralNetworkPrediction(tf_test_X, 1.0)
# Run training for TF_max_train_iters and save checkpoint at the end.
with tf.Session(graph=ff_nn_graph) as session:
for n_NN_reinit_trial in range(N_NN_reinit_trials):
print ("n_NN_reinit_trial = ", n_NN_reinit_trial)
# Run the Op to initialize the variables.
tf.global_variables_initializer().run()
print("Initialized")
if (FFNN.num_params < N_train_dataset):
print("OK: FFNN.num_params=%d < %d=N_train_dataset" % (FFNN.num_params, N_train_dataset))
else:
print(Fore.RED + "WARNING: FFNN.num_params=%d >= %d=N_train_dataset" % (FFNN.num_params, N_train_dataset))
print(Style.RESET_ALL)
# sys.exit("ERROR: FFNN.num_params=%d >= %d=N_train_dataset" % (FFNN.num_params, N_train_dataset))
# create log writer object
writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
np.set_printoptions(suppress=True)
| np.set_printoptions(precision=4) | numpy.set_printoptions |
from multiprocessing import Pool
import platform
import numpy as np
from mogp_emulator.GaussianProcess import (
GaussianProcessBase,
GaussianProcess,
PredictResult
)
from mogp_emulator.Kernel import KernelBase
from mogp_emulator.Priors import GPPriors
class MultiOutputGP(object):
"""Implementation of a multiple-output Gaussian Process Emulator.
Essentially a parallelized wrapper for the predict method. To fit
in parallel, use the ``fit_GP_MAP`` routine
Required arguments are ``inputs`` and ``targets``, both of which
must be numpy arrays. ``inputs`` can be 1D or 2D (if 1D, assumes
second axis has length 1). ``targets`` can be 1D or 2D (if 2D,
assumes a single emulator and the first axis has length 1).
Optional arguments specify how each individual emulator is
constructed, including the mean function, kernel, priors, and how
to handle the nugget. Each argument can take values allowed by the
base ``GaussianProcess`` class, in which case all emulators are
assumed to use the same value. Any of these arguments can
alternatively be a list of values with length matching the number
of emulators to set those values individually.
"""
def __init__(self, inputs, targets, mean=None, kernel="SquaredExponential", priors=None,
nugget="adaptive", inputdict={}, use_patsy=True):
"""
Create a new multi-output GP Emulator
"""
self.GPClass = GaussianProcess
if not inputdict == {}:
warnings.warn("The inputdict interface for mean functions has been deprecated. " +
"You must input your mean formulae using the x[0] format directly " +
"in the formula.", DeprecationWarning)
if not use_patsy:
warnings.warn("patsy is now required to parse all formulae and form design " +
"matrices in mogp-emulator. The use_patsy=False option will be ignored.")
# check input types and shapes, reshape as appropriate for the case of a single emulator
inputs = np.array(inputs)
targets = np.array(targets)
if len(inputs.shape) == 1:
inputs = np.reshape(inputs, (-1, 1))
if len(targets.shape) == 1:
targets = np.reshape(targets, (1, -1))
elif not (len(targets.shape) == 2):
raise ValueError("targets must be either a 1D or 2D array")
if not (len(inputs.shape) == 2):
raise ValueError("inputs must be either a 1D or 2D array")
if not (inputs.shape[0] == targets.shape[1]):
raise ValueError("the first dimension of inputs must be the same length as the second dimension of targets (or first if targets is 1D))")
self.n_emulators = targets.shape[0]
self.n = inputs.shape[0]
self.D = inputs.shape[1]
if not isinstance(mean, list):
mean = self.n_emulators*[mean]
assert isinstance(mean, list), "mean must be None, a string, a valid patsy model description, or a list of None/string/mean functions"
assert len(mean) == self.n_emulators
if isinstance(kernel, str) or issubclass(type(kernel), KernelBase):
kernel = self.n_emulators*[kernel]
assert isinstance(kernel, list), "kernel must be a Kernel subclass or a list of Kernel subclasses"
assert len(kernel) == self.n_emulators
if isinstance(priors, (GPPriors, dict)) or priors is None:
priorslist = self.n_emulators*[priors]
else:
priorslist = list(priors)
assert isinstance(priorslist, list), ("priors must be a GPPriors object, None, or arguments to construct " +
"a GPPriors object or a list of length n_emulators containing the above")
assert len(priorslist) == self.n_emulators, "Bad length for list provided for priors to MultiOutputGP"
if isinstance(nugget, (str, float)):
nugget = self.n_emulators*[nugget]
assert isinstance(nugget, list), "nugget must be a string, float, or a list of strings and floats"
assert len(nugget) == self.n_emulators
self.emulators = [ self.GPClass(inputs, single_target, m, k, p, n)
for (single_target, m, k, p, n) in zip(targets, mean, kernel, priorslist, nugget)]
def predict(self, testing, unc=True, deriv=False, include_nugget=True,
allow_not_fit=False, processes=None):
"""Make a prediction for a set of input vectors
Makes predictions for each of the emulators on a given set of
input vectors. The input vectors must be passed as a
``(n_predict, D)`` or ``(D,)`` shaped array-like object, where
``n_predict`` is the number of different prediction points
under consideration and ``D`` is the number of inputs to the
emulator. If the prediction inputs array has shape ``(D,)``,
then the method assumes ``n_predict == 1``. The prediction
points are passed to each emulator and the predictions are
collected into an ``(n_emulators, n_predict)`` shaped numpy
array as the first return value from the method.
Optionally, the emulator can also calculate the uncertainties
in the predictions (as a variance) and the derivatives with
respect to each input parameter. If the uncertainties are
computed, they are returned as the second output from the
method as an ``(n_emulators, n_predict)`` shaped numpy
array. If the derivatives are computed, they are returned as
the third output from the method as an ``(n_emulators,
n_predict, D)`` shaped numpy array. Finally, if uncertainties
are computed, the ``include_nugget`` flag determines if the
uncertainties should include the nugget. By default, this is
set to ``True``.
Derivatives have been deprecated due to changes in how the
mean function is computed, so setting ``deriv=True`` will
have no effect and will raise a ``DeprecationWarning``.
The ``allow_not_fit`` flag determines how the object handles
any emulators that do not have fit hyperparameter values
(because fitting presumably failed). By default,
``allow_not_fit=False`` and the method will raise an error
if any emulators are not fit. Passing ``allow_not_fit=True``
will override this and ``NaN`` will be returned from any
emulators that have not been fit.
As with the fitting, this computation can be done
independently for each emulator and thus can be done in
parallel.
:param testing: Array-like object holding the points where
predictions will be made. Must have shape
``(n_predict, D)`` or ``(D,)`` (for a single
prediction)
:type testing: ndarray
:param unc: (optional) Flag indicating if the uncertainties
are to be computed. If ``False`` the method
returns ``None`` in place of the uncertainty
array. Default value is ``True``.
:type unc: bool
:param include_nugget: (optional) Flag indicating if the
nugget should be included in the
predictive variance. Only relevant if
``unc = True``. Default is ``True``.
:type include_nugget: bool
:param allow_not_fit: (optional) Flag that allows predictions
to be made even if not all emulators have
been fit. Default is ``False`` which
will raise an error if any unfitted
emulators are present.
:type allow_not_fit: bool
:param processes: (optional) Number of processes to use when
making the predictions. Must be a positive
integer or ``None`` to use the number of
processors on the computer (default is
``None``)
:type processes: int or None
:returns: ``PredictResult`` object holding numpy arrays
containing the predictions, uncertainties, and
derivatives, respectively. Predictions and
uncertainties have shape ``(n_emulators,
n_predict)`` while the derivatives have shape
``(n_emulators, n_predict, D)``. If the ``do_unc``
or ``do_deriv`` flags are set to ``False``, then
those arrays are replaced by ``None``.
:rtype: PredictResult
"""
testing = np.array(testing)
if self.D == 1 and testing.ndim == 1:
testing = np.reshape(testing, (-1, 1))
elif testing.ndim == 1:
testing = np.reshape(testing, (1, len(testing)))
assert testing.ndim == 2, "testing must be a 2D array"
n_testing, D = np.shape(testing)
assert D == self.D, "second dimension of testing must be the same as the number of input parameters"
if deriv:
warnings.warn("Prediction derivatives have been deprecated and are no longer supported",
DeprecationWarning)
if not processes is None:
processes = int(processes)
assert processes > 0, "number of processes must be a positive integer"
if allow_not_fit:
predict_method = _gp_predict_default_NaN
else:
predict_method = self.GPClass.predict
if platform.system() == "Windows":
predict_vals = [predict_method(gp, testing, unc, deriv, include_nugget)
for gp in self.emulators]
else:
with Pool(processes) as p:
predict_vals = p.starmap(predict_method,
[(gp, testing, unc, deriv, include_nugget)
for gp in self.emulators])
# repackage predictions into numpy arrays
predict_unpacked, unc_unpacked, deriv_unpacked = [ | np.array(t) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python 2.7
@author: <NAME>
<EMAIL>
Last Update: 23.8.2018
Use Generative Model for posture extrapolation
"""
from datetime import datetime
import os, sys, numpy as np, argparse
from time import time
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
#from skimage.transform import resize
from skimage import measure
from scipy.io import loadmat
from scipy.misc import imread
from scipy.spatial.distance import euclidean, cdist
from sklearn.preprocessing import normalize
try:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
except:
from sklearn.lda import LDA
from sklearn.svm import LinearSVC
from utils import load_table, load_features, draw_border, fig2data, load_image
sys.path.append('./magnification/')
from Generator import Generator, find_differences, find_differences_cc
#import config_pytorch as cfg
import config_pytorch_human as cfg
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--length",type=int,default=8,
help="Sequence length")
parser.add_argument("-q", "--query",type=int,default=10,
help="Frame query")
parser.add_argument("-nn", "--nn",type=int,default=30,
help="Nearest neighbor for posture average")
parser.add_argument("-l", "--lambdas",type=float,default=2.5,
help="Extrapolation factor")
parser.add_argument("-g", "--gpu",type=int,default=0,
help="GPU device to use for image generation")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu)
#args.query = 1599
############################################
# 0. Prepare magnifier object
############################################
generator = Generator(z_dim=cfg.encode_dim,path_model=cfg.vae_weights_path)
############################################
# 1. Load sequences and features
############################################
detections = load_table(cfg.detection_file,asDict=False)
det_cohort= np.array(detections['cohort']) # Used for classifier and plots
det_time = np.array(detections['time']) # Used for classifier and plots
det_frames= np.array(detections['frames'])
det_videos= | np.array(detections['videos']) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Plot ranks of top 100 users in the cyberbullying dataset
Usage: python plot_fig4_top_entities.py
Input data files: ../data/[app_name]_out/complete_user_[app_name].txt, ../data/[app_name]_out/user_[app_name]_all.txt
Time: ~8M
"""
import sys, os, platform
from collections import defaultdict
from datetime import datetime
import numpy as np
from scipy.stats import entropy, kendalltau
import matplotlib as mpl
if platform.system() == 'Linux':
mpl.use('Agg') # no UI backend
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from utils.helper import Timer, melt_snowflake
from utils.metrics import mean_absolute_percentage_error as mape
cm = plt.cm.get_cmap('RdBu')
def write_to_file(filepath, header, datalist):
with open(filepath, 'w') as fout:
for user_idx, user_id in enumerate(header):
fout.write('{0}\t{1}\t{2}\n'.format(user_id, sum(datalist[user_idx]), ','.join(map(str, datalist[user_idx]))))
def read_from_file(filepath, dtype=0):
datalist = []
with open(filepath, 'r') as fin:
for line in fin:
user_id, total, records = line.rstrip().split('\t')
if dtype == 0:
records = list(map(int, records.split(',')))
else:
records = list(map(float, records.split(',')))
datalist.append(records)
return datalist
def main():
timer = Timer()
timer.start()
app_name = 'cyberbullying'
hours_in_day = 24
minutes_in_hour = 60
seconds_in_minute = 60
ms_in_second = 1000
num_bins = 100
width = ms_in_second // num_bins
num_top = 500
fig, axes = plt.subplots(1, 2, figsize=(7.2, 4.8), gridspec_kw={'width_ratios': [2.75, 3]})
axes = axes.ravel()
confusion_sampling_rate = np.load('../data/{0}_out/{0}_confusion_sampling_rate.npy'.format(app_name))
confusion_sampling_rate = np.nan_to_num(confusion_sampling_rate)
load_external_data = True
if not load_external_data:
sample_entity_stats = defaultdict(int)
with open('../data/{0}_out/user_{0}_all.txt'.format(app_name), 'r') as fin:
for line in fin:
split_line = line.rstrip().split(',')
sample_entity_stats[split_line[1]] += 1
# == == == == == == Part 2: Plot entity rank == == == == == == #
print('>>> found top {0} users in sample set...'.format(num_top))
sample_top = [kv[0] for kv in sorted(sample_entity_stats.items(), key=lambda x: x[1], reverse=True)[:num_top]]
# == == == == == == Part 1: Find tweets appearing in complete set == == == == == == #
complete_post_lists_hour = [[0] * hours_in_day for _ in range(num_top)]
complete_post_lists_min = [[0] * minutes_in_hour for _ in range(num_top)]
complete_post_lists_sec = [[0] * seconds_in_minute for _ in range(num_top)]
complete_post_lists_10ms = [[0] * num_bins for _ in range(num_top)]
complete_entity_stats = defaultdict(int)
with open('../data/{0}_out/complete_user_{0}.txt'.format(app_name), 'r') as fin:
for line in fin:
split_line = line.rstrip().split(',')
user_id = split_line[1]
if user_id in sample_top:
complete_entity_stats[user_id] += 1
user_idx = sample_top.index(user_id)
tweet_id = split_line[0]
timestamp_ms = melt_snowflake(tweet_id)[0]
dt_obj = datetime.utcfromtimestamp(timestamp_ms // 1000)
hour = dt_obj.hour
minute = dt_obj.minute
second = dt_obj.second
millisec = timestamp_ms % 1000
ms_idx = (millisec-7) // width if millisec >= 7 else (1000 + millisec-7) // width
complete_post_lists_hour[user_idx][hour] += 1
complete_post_lists_min[user_idx][minute] += 1
complete_post_lists_sec[user_idx][second] += 1
complete_post_lists_10ms[user_idx][ms_idx] += 1
write_to_file('./complete_post_lists_hour.txt', sample_top, complete_post_lists_hour)
write_to_file('./complete_post_lists_min.txt', sample_top, complete_post_lists_min)
write_to_file('./complete_post_lists_sec.txt', sample_top, complete_post_lists_sec)
write_to_file('./complete_post_lists_10ms.txt', sample_top, complete_post_lists_10ms)
print('>>> finish dumping complete lists...')
timer.stop()
# == == == == == == Part 2: Find appearing tweets in sample set == == == == == == #
sample_post_lists_hour = [[0] * hours_in_day for _ in range(num_top)]
sample_post_lists_min = [[0] * minutes_in_hour for _ in range(num_top)]
sample_post_lists_sec = [[0] * seconds_in_minute for _ in range(num_top)]
sample_post_lists_10ms = [[0] * num_bins for _ in range(num_top)]
estimated_post_lists_hour = [[0] * hours_in_day for _ in range(num_top)]
estimated_post_lists_min = [[0] * minutes_in_hour for _ in range(num_top)]
estimated_post_lists_sec = [[0] * seconds_in_minute for _ in range(num_top)]
estimated_post_lists_10ms = [[0] * num_bins for _ in range(num_top)]
hourly_conversion = np.mean(confusion_sampling_rate, axis=(1, 2, 3))
minutey_conversion = np.mean(confusion_sampling_rate, axis=(2, 3))
secondly_conversion = np.mean(confusion_sampling_rate, axis=(3))
with open('../data/{0}_out/user_{0}_all.txt'.format(app_name), 'r') as fin:
for line in fin:
split_line = line.rstrip().split(',')
user_id = split_line[1]
if user_id in sample_top:
user_idx = sample_top.index(user_id)
tweet_id = split_line[0]
timestamp_ms = melt_snowflake(tweet_id)[0]
dt_obj = datetime.utcfromtimestamp(timestamp_ms // 1000)
hour = dt_obj.hour
minute = dt_obj.minute
second = dt_obj.second
millisec = timestamp_ms % 1000
ms_idx = (millisec-7) // width if millisec >= 7 else (1000 + millisec-7) // width
sample_post_lists_hour[user_idx][hour] += 1
sample_post_lists_min[user_idx][minute] += 1
sample_post_lists_sec[user_idx][second] += 1
sample_post_lists_10ms[user_idx][ms_idx] += 1
estimated_post_lists_hour[user_idx][hour] += 1 / hourly_conversion[hour]
estimated_post_lists_min[user_idx][minute] += 1 / minutey_conversion[hour, minute]
estimated_post_lists_sec[user_idx][second] += 1 / secondly_conversion[hour, minute, second]
estimated_post_lists_10ms[user_idx][ms_idx] += 1 / confusion_sampling_rate[hour, minute, second, ms_idx]
write_to_file('./sample_post_lists_hour.txt', sample_top, sample_post_lists_hour)
write_to_file('./sample_post_lists_min.txt', sample_top, sample_post_lists_min)
write_to_file('./sample_post_lists_sec.txt', sample_top, sample_post_lists_sec)
write_to_file('./sample_post_lists_10ms.txt', sample_top, sample_post_lists_10ms)
write_to_file('./estimated_post_lists_hour.txt', sample_top, estimated_post_lists_hour)
write_to_file('./estimated_post_lists_min.txt', sample_top, estimated_post_lists_min)
write_to_file('./estimated_post_lists_sec.txt', sample_top, estimated_post_lists_sec)
write_to_file('./estimated_post_lists_10ms.txt', sample_top, estimated_post_lists_10ms)
print('>>> finish dumping sample and estimated lists...')
timer.stop()
else:
sample_top = []
complete_post_lists_hour = []
with open('./complete_post_lists_hour.txt', 'r') as fin:
for line in fin:
user_id, total, records = line.rstrip().split('\t')
sample_top.append(user_id)
records = list(map(int, records.split(',')))
complete_post_lists_hour.append(records)
sample_post_lists_hour = read_from_file('./sample_post_lists_hour.txt', dtype=0)
sample_post_lists_min = read_from_file('./sample_post_lists_min.txt', dtype=0)
sample_post_lists_sec = read_from_file('./sample_post_lists_sec.txt', dtype=0)
sample_post_lists_10ms = read_from_file('./sample_post_lists_10ms.txt', dtype=0)
estimated_post_lists_hour = read_from_file('./estimated_post_lists_hour.txt', dtype=1)
estimated_post_lists_min = read_from_file('./estimated_post_lists_min.txt', dtype=1)
estimated_post_lists_sec = read_from_file('./estimated_post_lists_sec.txt', dtype=1)
estimated_post_lists_10ms = read_from_file('./estimated_post_lists_10ms.txt', dtype=1)
# == == == == == == Part 3: Find the best estimation by comparing JS distance == == == == == == #
ret = {}
num_estimate_list = []
num_sample_list = []
num_complete_list = []
sample_entity_stats = {user_id: sum(sample_post_lists_hour[user_idx]) for user_idx, user_id in enumerate(sample_top)}
complete_entity_stats = {user_id: sum(complete_post_lists_hour[user_idx]) for user_idx, user_id in enumerate(sample_top)}
min_mat = np.array([], dtype=np.int64).reshape(0, 60)
sec_mat = np.array([], dtype=np.int64).reshape(0, 60)
for user_idx, user_id in enumerate(sample_top):
num_sample = sample_entity_stats[user_id]
num_complete = complete_entity_stats[user_id]
hour_entropy = entropy(sample_post_lists_hour[user_idx], base=hours_in_day)
min_entropy = entropy(sample_post_lists_min[user_idx], base=minutes_in_hour)
sec_entropy = entropy(sample_post_lists_sec[user_idx], base=seconds_in_minute)
ms10_entropy = entropy(sample_post_lists_10ms[user_idx], base=num_bins)
min_mat = np.vstack((min_mat, np.array(sample_post_lists_min[user_idx]).reshape(1, -1)))
sec_mat = np.vstack((sec_mat, | np.array(sample_post_lists_sec[user_idx]) | numpy.array |
"""
Code to cut SMPL into near symmetric parts.
Author: Bharat
Cite: Combining Implicit Function Learning and Parametric Models for 3D Human Reconstruction, ECCV 2020.
"""
import numpy as np
from psbody.mesh import Mesh
import sys
sys.path.append('..')
import pickle as pkl
from lib.smplx.body_models import SMPLX
def get_tpose_smplx():
# sp = SmplPaths(gender='neutral')
# smplx = sp.get_smpl()
# smplx.trans[:] = 0
# smplx.pose[:] = 0
smplx_output = SMPLX(model_path="/home/chen/SMPLX/models/smplx", batch_size=1, gender='neutral')()
return smplx_output
def cut_right_forearm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[verts[:, 0] < -0.6] = 1 # right hand
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_forearm ', np.where(col)[0].shape)
return col
def cut_left_forearm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[verts[:, 0] > 0.6] = 1 # left hand
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_forearm ', np.where(col)[0].shape)
return col
def cut_right_midarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] >= -0.6) & (verts[:, 0] < -0.4)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_midarm ', np.where(col)[0].shape)
return col
def cut_right_upperarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] >= -0.4) & (verts[:, 0] < -0.2)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_upperarm ', np.where(col)[0].shape)
return col
def cut_left_midarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] <= 0.6) & (verts[:, 0] > 0.4)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_midarm ', np.where(col)[0].shape)
return col
def cut_left_upperarm(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 0] <= 0.4) & (verts[:, 0] > 0.2)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('left_upperarm ', np.where(col)[0].shape)
return col
def cut_head(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[verts[:, 1] > 0.16] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('head ', np.where(col)[0].shape)
return col
def cut_upper_right_leg(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -0.44) & (verts[:, 0] < 0) & (verts[:, 1] >= -0.84)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('upper_right_leg ', np.where(col)[0].shape)
return col
def cut_right_leg(display=False):
smplx = get_tpose_smplx()
verts = smplx.vertices.detach().cpu().numpy().squeeze()
faces = smplx.faces
ms = Mesh(v=verts, f=faces)
col = np.zeros(verts.shape[0])
col[(verts[:, 1] < -0.84) & (verts[:, 0] < 0) & (verts[:, 1] > -1.14)] = 1
if display:
ms.set_vertex_colors_from_weights(col)
ms.show()
print('right_leg ', | np.where(col) | numpy.where |
import numpy as np
def movingaverage(interval, window_size=14, pad=False):
window = np.ones(int(window_size))/float(window_size)
ma= np.convolve(interval, window, 'same')
# pad the end properly
if pad:
w = window_size
x = np.array(interval)
n = len(ma)
start = n-w
for i in range(start, start+w):
seq=x[i-w:i]
ma[i]=seq.sum()/len(seq)
return ma
def gentrends(x, window=1/3.0, charts=True):
"""
Returns a Pandas dataframe with support and resistance lines.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0] # find the index of the abs max
min1 = np.where(x == min(x))[0][0] # find the index of the abs min
# First the max
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
# Now the min
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
# Now find the indices of the secondary extrema
max2 = np.where(x == max2)[0][0] # find the index of the 2nd max
min2 = np.where(x == min2)[0][0] # find the index of the 2nd min
# Create & extend the lines
maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points
minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points
a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline
a_min = x[min1] - (minslope * min1) # y-intercept for min trendline
b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt
b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point
maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
# OUTPUT
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(trends)
grid()
show()
return trends, maxslope, minslope
def segtrends(x, segments=2, charts=True, momentum=False):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
n = len(x)
y = np.array(x)
movy = movingaverage(y, 7)
# Implement trendlines
# Find the indexes of these maxima in the data
segments = int(segments)
maxima = np.ones(segments)
minima = | np.ones(segments) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 16:22:17 2021
@author: mike_ubuntu
"""
import numpy as np
import warnings
#from abc import ABC, abstractmethod, abstractproperty
from functools import reduce
import copy
import gc
import inspect
import time
from sklearn.linear_model import LinearRegression
import src.globals as global_var
from src.token_family import TF_Pool
from src.factor import Factor
from src.supplementary import Filter_powers, Population_Sort, form_label#, memory_assesment
import src.moeadd.moeadd_stc as moeadd
#import src.moeadd.moeadd_supplementary as moeadd_sup
def Check_Unqueness(obj, background):
return not any([elem == obj for elem in background])
def normalize_ts(Input):
# print('normalize_ts Input:', Input)
matrix = np.copy(Input)
# print(Matrix.shape)
if np.ndim(matrix) == 0:
raise ValueError('Incorrect input to the normalizaton: the data has 0 dimensions')
elif np.ndim(matrix) == 0:
return matrix
else:
for i in np.arange(matrix.shape[0]):
std = np.std(matrix[i])
if std != 0:
matrix[i] = (matrix[i] - np.mean(matrix[i])) / std
else:
matrix[i] = 1
return matrix
class Complex_Structure(object):
def __init__(self, interelement_operator = np.add, *params):
self.structure = None
self.interelement_operator = interelement_operator
def __eq__(self, other):
if type(other) != type(self):
raise ValueError('Type of self and other are different')
return (all([any([other_elem == self_elem for other_elem in other.structure]) for self_elem in self.structure]) and
all([any([other_elem == self_elem for self_elem in self.structure]) for other_elem in other.structure]) and
len(other.structure) == len(self.structure))
def set_evaluator(self, evaluator):
raise NotImplementedError
# self._eval_obj = evaluator
def evaluate(self, structural = False):
assert len(self.structure) > 0, 'Attempt to evaluate an empty complex structure'
if len(self.structure) == 1:
return self.structure[0].evaluate(structural)
else:
# print([type(elem) for elem in self.structure])
return reduce(lambda x, y: self.interelement_operator(x, y.evaluate(structural)),
self.structure[1:], self.structure[0].evaluate(structural))
def reset_saved_state(self):
self.saved = {True:False, False:False}
self.saved_as = {True:None, False:None}
for elem in self.structure:
elem.reset_saved_state()
@property
def name(self):
pass
class Term(Complex_Structure):
def __init__(self, pool,
passed_term = None, max_factors_in_term = 1, forbidden_tokens = None, interelement_operator = np.multiply):
super().__init__(interelement_operator)
self.pool = pool
self.max_factors_in_term = max_factors_in_term
if type(passed_term) == type(None):
self.Randomize(forbidden_tokens)
else:
self.Defined(passed_term)
if type(global_var.tensor_cache) != type(None):
self.use_cache()
# if type(global_var.grid_cache) != type(None):
# self.use_grid_cache()
self.reset_saved_state() # key - state of normalization, value - if the variable is saved in cache
@property
def cache_label(self):
if len(self.structure) > 1:
structure_sorted = sorted(self.structure, key = lambda x: x.cache_label)
cache_label = tuple([elem.cache_label for elem in structure_sorted])#reduce(form_label, structure_sorted, '')
else:
cache_label = self.structure[0].cache_label
return cache_label
def use_cache(self):
self.cache_linked = True
for idx, _ in enumerate(self.structure):
if not self.structure[idx].cache_linked:
self.structure[idx].use_cache()
#
# def use_grids_cache(self): #!
# self.grid_cache_linked = True
# for idx, _ in enumerate(self.structure):
# if not self.structure[idx].cache_linked:
# self.structure[idx].use_cache()
def Defined(self, passed_term):
self.structure = []
if type(passed_term) == list or type(passed_term) == tuple:
for i, factor in enumerate(passed_term):
if type(factor) == str:
# token_family = [token_family for token_family in self.tokens if factor in token_family.tokens][0]
# self.structure.append(Factor(factor, token_family, randomize = True))
self.structure.append(self.pool.create(label = factor)); raise NotImplementedError
elif type(factor) == Factor:
self.structure.append(factor)
else:
raise ValueError('The structure of a term should be declared with str or src.factor.Factor obj, instead got', type(factor))
else: # Случай, если подается лишь 1 токен
if type(passed_term) == str:
# token_family = [token_family for token_family in self.tokens if passed_term in token_family.tokens][0]
# self.structure.append(Factor(passed_term, token_family, randomize = True))
self.structure.append(self.pool.create(label = passed_term)); raise NotImplementedError
elif type(passed_term) == Factor:
self.structure.append(passed_term)
else:
raise ValueError('The structure of a term should be declared with str or src.factor.Factor obj, instead got', type(passed_term))
def Randomize(self, forbidden_factors = None, **kwargs):
if np.sum(self.pool.families_cardinality(meaningful_only = True)) == 0:
raise ValueError('No token families are declared as meaningful for the process of the system search')
factors_num = np.random.randint(1, self.max_factors_in_term +1)
# print('factors:', factors_num)
while True:
self.occupied_tokens_labels = []
occupied_by_factor, factor = self.pool.create(label = None, create_meaningful = True,
occupied = self.occupied_tokens_labels, **kwargs)
self.structure = [factor,]
self.occupied_tokens_labels.extend(occupied_by_factor)
for i in np.arange(1, factors_num):
occupied_by_factor, factor = self.pool.create(label = None, create_meaningful = False,
occupied = self.occupied_tokens_labels,
def_term_tokens = [token.label for token in self.structure],
**kwargs)
self.structure.append(factor)
self.occupied_tokens_labels.extend(occupied_by_factor)
self.structure = Filter_powers(self.structure)
if type(forbidden_factors) == type(None):
# print('term length ff', len(self.structure))
break
elif all([(Check_Unqueness(factor, forbidden_factors) or not factor.status['unique_for_right_part']) for factor in self.structure]):
# print('term length compl cond', len(self.structure), forbidden_factors)
break
def evaluate(self, structural): # , normalize = True
assert type(global_var.tensor_cache) != type(None), 'Currently working only with connected cache'
# print('Normalized state of current equaton: ', normalize)
# print('normalized evaluation', normalize, [elem for idx, elem in enumerate(inspect.stack()[:-18])], len(inspect.stack()))
# print('\n')
normalize = structural #
if self.saved[structural]:
value = global_var.tensor_cache.get(self.cache_label, normalized = normalize,
saved_as = self.saved_as[normalize])
value = value.reshape(value.size)
return value
else:
self.prev_normalized = normalize
value = super().evaluate(structural)
# print(self.name, ' - ')
# print(value.shape)
if normalize and np.ndim(value) != 1:
value = normalize_ts(value)
elif normalize and np.ndim(value) == 1 and np.std(value) != 0:
value = (value - np.mean(value))/ | np.std(value) | numpy.std |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.