prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
from precomputeFx import *
all_nodes_180 = np.load('../../latticeFiles/all_nodes_latt180.npy',allow_pickle=True)
# precompute fx for all possible lattice locations for a given fault;
## fault_name: string for file save prefix to indicate fault
## fault: array with first element fault strike and second element fault dip
## arr_of_nodes: either all_nodes_180 or all_nodes_360
## num_nodes: typically len(arr_of_nodes)
test_fx_ = precompute_fx('test_fault_1', | np.array([0.0,np.pi/2]) | numpy.array |
# coding: utf-8
"""
Created on 16/05/2019
@author: baptiste
"""
from ho_homog import periodicity
import numpy as np
import logging
from pytest import approx
logger = logging.getLogger("Test_periodicity")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s :: %(levelname)s :: %(name)s :: %(message)s", "%H:%M:%S"
)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
def test_pbc_from_vectors():
per_vect = np.array([[4.0, 0.0], [0.0, 8.0]])
pbc = periodicity.PeriodicDomain.pbc_dual_base(per_vect, "XY")
test_points = [
(0.0, 0.0),
(4.0, 0.0),
(4.0, 8.0),
(0.0, 8.0),
(2.0, 0.0),
(4.0, 4.0),
(2.0, 8.0),
(0.0, 4.0),
]
test_points = [np.array(coord) for coord in test_points]
inside_results = [
True,
False,
False,
False,
True,
False,
False,
True,
]
map_results = [
(39996, 79992),
(0.0, 0.0),
(0.0, 0.0),
(0.0, 0.0),
(39996, 79992),
(0.0, 4.0),
(2.0, 0.0),
(39996, 79992),
]
map_results = [ | np.array(coord) | numpy.array |
import os
from options.test_options import TestOptions
from models import create_model
from util.util import tensor2labelim, tensor2confidencemap
from models.sne_model import SNE
import torchvision.transforms as transforms
import torch
import numpy as np
import cv2
import copy
import tqdm
import glob
class dataset():
def __init__(self):
self.num_labels = 2
def load_calib(filepath):
rawdata = read_calib_file(filepath)
K = | np.reshape(rawdata['cam_K'], (3,3)) | numpy.reshape |
# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
import numpy as np
from netcal import AbstractCalibration, dimensions, accepts
from .NearIsotonicRegression import NearIsotonicRegression
class ENIR(AbstractCalibration):
"""
Ensemble of Near Isotonic Regression (ENIR) models [1]_. These models allow - in contrast to standard
:class:`IsotonicRegression` method - a violation of the monotony restrictions. Using the *modified
Pool-Adjacent-Violators Algorithm (mPAVA)*, this method build multiple Near Isotonic Regression models
and weights them by a certain score function.
Let :math:`\\mathcal{D} = \\{(x_0, y_0), (x_1, y_1), ... \\}` denote
a data set with input data :math:`x` and ground truth labels :math:`y \\in \\{0, 1\\}` of length :math:`N`.
Let :math:`M` denote a model with estimated parameters :math:`\\hat{\\theta}` of length :math:`K`
and let :math:`\\hat{p}` denote the confidence estimates
on :math:`\\mathcal{D}` of model :math:`M` by parameters :math:`\\hat{\\theta}`.
The score function might either be the *Aikaike Information
Criterion (AIC)* given by
.. math::
AIC = -2 L ( \\hat{\\theta} ) + 2K
or the *Bayesian Information Criterion* given by
.. math::
BIC = -2 L ( \\hat{\\theta} ) + \log(N)K
with :math:`L (\\hat{ \\theta })` as the log-likelihood given by
.. math::
L (\\hat{ \\theta }) = \\sum_{i=1}^N y^{(i)} \\log(\\hat{p}^{(i)}_\\hat{\\theta}) +
(1-y^{(i)}) \\log(1 - \\hat{p}^{(i)}_\\hat{\\theta}) .
These scores can be used to calculate a model posterior given by
.. math::
p(M | \\mathcal{D}) \\propto p( \\mathcal{D} | M )p(M) \\approx \\exp( -BIC/2 )p(M) .
Using the elbow method to sort out models with a low relative score, the weights for each model can be obtained
by normalizing over all model posterior scores.
Parameters
----------
score_function: str, default='BIC'
define score functions:
- 'BIC': Bayesian-Information-Criterion
- 'AIC': Akaike-Information-Criterion
quick_init : bool, default=True
Allow quick initialization of NIR (equal consecutive values are grouped directly).
detection : bool, default: False
If False, the input array 'X' is treated as multi-class confidence input (softmax)
with shape (n_samples, [n_classes]).
If True, the input array 'X' is treated as a box predictions with several box features (at least
box confidence must be present) with shape (n_samples, [n_box_features]).
independent_probabilities : bool, optional, default: False
Boolean for multi class probabilities.
If set to True, the probability estimates for each
class are treated as independent of each other (sigmoid).
References
----------
.. [1] Naeini, <NAME>, and <NAME>:
"Binary classifier calibration using an ensemble of near isotonic regression models."
2016 IEEE 16th International Conference on Data Mining (ICDM). IEEE, 2016.
`Get source online <https://ieeexplore.ieee.org/iel7/7837023/7837813/07837860.pdf>`_
"""
@accepts(str, bool, bool, bool)
def __init__(self, score_function: str = 'BIC', quick_init: bool = True,
detection: bool = False, independent_probabilities: bool = False):
"""
Constructor.
Parameters
----------
score_function: str, default='BIC'
define score functions:
- 'BIC': Bayesian-Information-Criterion
- 'AIC': Akaike-Information-Criterion
quick_init : bool, default=True
Allow quick initialization of NIR (equal consecutive values are grouped directly).
detection : bool, default: False
If False, the input array 'X' is treated as multi-class confidence input (softmax)
with shape (n_samples, [n_classes]).
If True, the input array 'X' is treated as a box predictions with several box features (at least
box confidence must be present) with shape (n_samples, [n_box_features]).
independent_probabilities : bool, optional, default: False
Boolean for multi class probabilities.
If set to True, the probability estimates for each
class are treated as independent of each other (sigmoid).
"""
super().__init__(detection=detection, independent_probabilities=independent_probabilities)
# for multi class calibration with K classes, K binary calibration models are needed
self._multiclass_instances = []
# list of all binning models with [<NearIsotonicRegression>, ...]
self._binning_models = []
self._model_scores = []
if type(score_function) != str:
raise AttributeError("Score function must be string.")
if score_function.lower() not in ['aic', 'bic']:
raise AttributeError("Unknown score function \'%s\'" % score_function)
self.score_function = score_function.lower()
self.quick_init = quick_init
def clear(self):
"""
Clear model parameters.
"""
super().clear()
# for multi class calibration with K classes, K binary calibration models are needed
for instance in self._multiclass_instances:
del instance
self._multiclass_instances.clear()
# list of all binning models with [<NearIsotonicRegression>, ...]
for model in self._binning_models:
del model
self._binning_models.clear()
self._model_scores = None
@accepts(bool)
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
# get all params of current instance and save as dict
params = super().get_params(deep=deep)
if deep:
# save binning models as well - this is not captured by super class method
params['_binning_models'] = []
for model in self._binning_models:
params['_binning_models'].append(model.get_params(deep=deep))
return params
def set_params(self, **params) -> 'ENIR':
"""
Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if '_binning_models' in params:
self._binning_models = []
for model in params['_binning_models']:
instance = NearIsotonicRegression()
instance.set_params(**model)
self._binning_models.append(instance)
# remove key and value from dict to prevent override in super method
del params['_binning_models']
# invoke super method
super().set_params(**params)
return self
@dimensions((1, 2), (1, 2))
def fit(self, X: np.ndarray, y: np.ndarray) -> 'ENIR':
"""
Build ENIR calibration model.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes]) or (n_samples, [n_box_features])
NumPy array with confidence values for each prediction on classification with shapes
1-D for binary classification, 2-D for multi class (softmax).
On detection, this array must have 2 dimensions with number of additional box features in last dim.
y : np.ndarray, shape=(n_samples, [n_classes])
NumPy array with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).
Returns
-------
ENIR
Instance of class :class:`ENIR`.
"""
# detection mode is not supported natively
if self.detection:
print("WARNING: Detection mode is not supported natively by ENIR method. "
"This will discard all additional box information and only keep confidence scores.")
# if 2d, keep only confidence scores and preserve 2d structure
if len(X.shape) == 2:
X = np.expand_dims(X[:, 0], axis=1)
X, y = super().fit(X, y)
# multiclass case: create K sub models for each label occurrence
if not self._is_binary_classification():
# create multiple one vs all models
self._multiclass_instances = self._create_one_vs_all_models(X, y, ENIR, self.score_function,
self.quick_init)
return self
# binary classification problem but got two entries? (probability for 0 and 1 separately)?
# we only need probability p for Y=1 (probability for 0 is (1-p) )
if len(X.shape) == 2:
X = np.array(X[:, 1])
else:
X = np.array(X)
X, y = self._sort_arrays(X, y)
# log action
print("Get path of all Near Isotonic Regression models with mPAVA ...")
iso = NearIsotonicRegression(quick_init=self.quick_init,
independent_probabilities=self.independent_probabilities)
iso.fit(X, y)
model_list = [iso]
while iso is not None:
iso = iso.get_next_model()
model_list.append(iso)
# first element is perfect fit to training data - discard due to overfitting
model_list.pop(0)
# last element is always None - indicator of mPAVA termination
model_list.pop()
# get model scores and binning models by elbow method
self._model_scores, self._binning_models = self._elbow(X, y, model_list, self.score_function, alpha=0.001)
return self
@dimensions((1, 2))
def transform(self, X: np.ndarray) -> np.ndarray:
"""
After model calibration, this function is used to get calibrated outputs of uncalibrated
confidence estimates.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes])
NumPy array with uncalibrated confidence estimates.
1-D for binary classification, 2-D for multi class (softmax).
Returns
-------
np.ndarray, shape=(n_samples, [n_classes])
NumPy array with calibrated confidence estimates.
1-D for binary classification, 2-D for multi class (softmax).
"""
# detection mode is not supported natively
if self.detection:
# if 2d, keep only confidence scores and preserve 2d structure
if len(X.shape) == 2:
X = np.expand_dims(X[:, 0], axis=1)
X = super().transform(X)
# prepare return value vector
calibrated = | np.zeros(X.shape) | numpy.zeros |
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
plt.style.use(['transitions.mplstyle'])
import matplotlib
colors = matplotlib.rcParams['axes.prop_cycle'].by_key()['color']
black = matplotlib.rcParams['text.color']
width = matplotlib.rcParams['axes.linewidth']
from matplotlib import colors as mplcolors
import sys
sys.path.append('lib/')
import plotting
import evolimmune
def func(pienv, s):
return np.clip((pienv*(2.0-s) + s-1.0)/s, 0, 1)
s = np.linspace(0, 1)
fig, axes = plt.subplots(figsize=(6.6, 2.7), ncols=2)
ax = axes[0]
cmap = mplcolors.LinearSegmentedColormap.from_list('mycmap', [colors[0], colors[1]])
CS = ax.contour(func(s[:,None], s[None,:]), extent=(0, 1, 0, 1), cmap=cmap, levels= | np.arange(0.2, 1.0, 0.2) | numpy.arange |
""" This module is used to generate a 3d mesh based on a 2d section in
the xy-plane that is revolved around the x-axis. Note that only
quadratic elements are supported. For linear elements, Abaqus' builtin
routine works reasonably well (although the node coordinate accuracy
seem a bit low), see
:py:func:`~rollover.three_d.wheel.substructure.generate_3d_mesh`
"""
from __future__ import print_function
import numpy as np
from rollover.utils import naming_mod as names
def generate(wheel_model, mesh_size):
""" Based on a meshed 2d-profile of a wheel, generate a 3d-revolved
mesh with angular spacing such that the elements on the outer radius
have a circumferential size of mesh_size.
:param wheel_model: A model that contains a wheel part with a 2d
section mesh
:type wheel_model: Model object (Abaqus)
:param mesh_size: The mesh size to decide the angular increments
:type mesh_size: float
:returns: The wheel part and the angles for the element end planes
:type: tuple( Part object(Abaqus), np.array )
"""
wheel_part = wheel_model.parts[names.wheel_part]
# 1) Extract the 2d mesh
mesh_2d = get_2d_mesh(wheel_part)
# 2) Create the 3d-mesh
mesh_3d = make_3d_mesh_quad(mesh_2d, mesh_size)
# 3) Save the 3d-mesh to a part definition in an abaqus input file
input_file = save_3d_mesh_to_inp(mesh_3d)
# 4) Import the mesh. Delete the old part, and import the 3d mesh
del wheel_model.parts[names.wheel_part]
wheel_model.PartFromInputFile(inputFileName=input_file)
wheel_part = wheel_model.parts[names.wheel_part]
return wheel_part, mesh_3d['angles']
def get_2d_mesh(wheel_part):
""" Based on the wheel part, determine the 2d mesh information
:param wheel_part: The wheel part containing the 2d mesh
:type wheel_part: Part object (Abaqus)
:returns: Mesh specification with the following fields:
- nodes: np.array with node coordinates
- elements: dictionary with keys according to number of
nodes in element: N3,N4,N6,N8. Each item contains a list
of list of node labels
- edge_nodes: list of labels of nodes that belong to the
edges of the elements (and not the corners)
- corner_nodes: list of labels of nodes that belong to the
corners of the elements.
:rtype: dict
"""
node_coords = np.array([n.coordinates for n in wheel_part.nodes])
elements = {'N3': [], 'N4': [], 'N6': [], 'N8': []}
edge_nodes = []
corner_nodes = []
for e in wheel_part.elements:
enods = e.connectivity
num_enods = len(enods)
key = 'N' + str(num_enods)
if key in elements:
elements[key].append(enods)
else:
raise ValueError('Unknown element type with '
+ str(num_enods) + ' nodes.\n'
+ '- Element label: ' + e.label + '\n'
+ '- Element nodes: ' + enods + '\n'
+ '- Element type : ' + e.type + '\n')
if num_enods > 4: # 2nd order, second half of nodes on edges
for n in enods[:num_enods/2]:
if n not in corner_nodes:
corner_nodes.append(n)
for n in enods[num_enods/2:]:
if n not in edge_nodes:
edge_nodes.append(n)
else: # 1st order elements, all nodes at corners
for n in enods:
if n not in corner_nodes:
corner_nodes.append(n)
the_mesh = {'nodes': node_coords, 'elements': elements,
'edge_nodes': edge_nodes, 'corner_nodes': corner_nodes}
return the_mesh
def make_3d_mesh_quad(mesh_2d, mesh_size):
""" Revolve a 2d-mesh into a 3d-mesh
:param mesh_2d: Mesh specification with the following fields:
- nodes: np.array with node coordinates
- elements: dictionary with keys according to number
of nodes in element: N3,N4,N6,N8.
Each item contains a list of list of node labels
- edge_nodes: list of labels of nodes that belong to
the edges of the elements (and not the corners)
- corner_nodes: list of labels of nodes that belong
to the corners of the elements.
:type mesh_2d: dict
:param mesh_size: The circumferential mesh size at largest radius
:type mesh_size: float
:returns: Mesh specification with the following fields:
- nodes: np.array with node coordinates
- elements: dictionary with keys according to number
of nodes in element: N15, N20. Each item contains a list
of list of node labels
- angles: np.array of angles for angular increments of
elements.
:rtype: dict
"""
nodes_2d = mesh_2d['nodes']
elems_2d = mesh_2d['elements']
edge_node_num_2d = mesh_2d['edge_nodes']
corner_node_num_2d = mesh_2d['corner_nodes']
r_outer = np.max(np.abs(nodes_2d[:, 1]))
num_angles = int(r_outer*2*np.pi/mesh_size)
angles = np.linspace(0, 2*np.pi, num_angles+1)[:-1]
delta_angle = angles[1]-angles[0]
# Calculate size of mesh and allocate variables
num_corner_nodes_2d = len(corner_node_num_2d)
num_edge_nodes_2d = len(edge_node_num_2d)
num_nodes_per_section = 2*num_corner_nodes_2d + num_edge_nodes_2d
nodes = np.zeros((num_nodes_per_section*num_angles, 3), dtype=np.float)
corner_node_num = np.zeros((num_corner_nodes_2d, num_angles), dtype=np.int)
edge_ip_node_num = np.zeros((num_edge_nodes_2d, num_angles), dtype=np.int)
edge_op_node_num = np.zeros((num_corner_nodes_2d, num_angles), dtype=np.int)
edge_op_node_num[-1,-1] = -1 # Used the first iteration in the loop
for i, ang in enumerate(angles):
# Corner nodes
corner_node_num[:, i] = edge_op_node_num[-1,i-1] + 1 + np.arange(num_corner_nodes_2d)
for j, num in enumerate(corner_node_num[:,i]):
coords_2d = nodes_2d[corner_node_num_2d[j], :]
nodes[num, :] = rotate_coords(coords_2d, ang)
# Edge nodes (in plane)
edge_ip_node_num[:, i] = corner_node_num[-1,i] + 1 + | np.arange(num_edge_nodes_2d) | numpy.arange |
"""
Created on Thu Oct. 10 2019
Recent changes for the version 0.1.1:
1) Insead of giving the input optical penetration depth only give the input
of the complex refractive index "n". This is a material parameter, so
the input is given in the simulation --> add_layer(.) command.
Now "LB" and "TMM" source are initialized almost in the same way
2) One of the Outputs of sim.run() is T. But now we change it to be a
3 dimensional array, with dim0 = time; dim1 = space; dim2 = subsystem
3) The input for the visual class in the v.contour() function should not be
a string but just numbers corresponding to different systems.
@author: <NAME>
<EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from bspline import Bspline
from bspline.splinelab import aptknt
import time
from matplotlib.animation import FuncAnimation as movie
from tqdm import tqdm #Progressbar
#==============================================================================
class temperature(object):
def __init__(self):
self.plt_points = 30 #number of points in x grid
self.length = np.array([0,0]) #length of x space,starting from 0
self.Left_BC_Type = 1 #Boundary conditions Default is Neumann
self.Right_BC_Type = 1 #1=> Neumann; 0=> Dirichlet
self.init = lambda x: 300+0*x # initial temperature of probe
self.n = np.array([1,1],dtype=complex) # Initial refractive index air|...|air
self.conductivity = [1] #This gets deleted after initialisation
self.heatCapacity = [1] #those values are just here to make space
self.rho = [1] #Actual values are given, when 'addLayer(length, conductivity,heatCapacity,rho)' is executed
self.collocpts = 12
self.setup = False #first time setup to not double calculated
def getProperties(self): #to depict the properties of the object
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Temperature')
#for every layer, a function to calculate the derivative of k(T)
def diff_conductivity(self,phi,num_of_material):
eps =1e-9
dc = (self.conductivity[num_of_material](phi+eps)-self.conductivity[num_of_material](phi))/eps
return(dc)
#Creating the key matrices for B-splines. Those are A0,A1,A2
#A0 => Zero derivative; A1 => 1st order derivative....
#We create the matices for every layer, with respective length ect
#then we put them together to Abig => Boundary and interface conditions are applied here.
def Msetup(self):
#Deleting the ifrst element of the default initialization
#After creating the element with 'addLayer' we dont need them!
if not self.setup:
self.length = self.length[1:]
self.conductivity = self.conductivity[1:]
self.heatCapacity = self.heatCapacity[1:]
self.rho = self.rho[1:]
self.setup = True
#Length and numper of grid points for each respective layer
length = self.length
plt_points = self.plt_points
num_of_points = self.collocpts #Number of points per layer used in the spline for collocation
order = 5 #order of the spline
x = np.array(np.zeros([np.size(length)-1,num_of_points]))
x_plt = np.array(np.zeros([np.size(length)-1,plt_points]))
knot_vector = np.array(np.zeros([np.size(length)-1,num_of_points+order+1]))
basis = np.array(np.zeros(np.size(length)-1))
A0h = []; A1h = []; A2h = []; Ch = [];
LayerMat = np.array([np.zeros((num_of_points,num_of_points))])
#Create all the big matices A0,A1,A2 & C. C is used to map on a fine mesh in x- space.
#For every layer we set up splines between the boundaries
for i in range(0,np.size(length)-1):
x[i,:] = np.linspace(length[i], length[i+1] , num_of_points)
x_plt[i,:] = np.linspace(length[i], length[i+1] , plt_points)
knot_vector[i,:] = aptknt(x[i,:], order) #prepare for Spline matrix
basis = Bspline(knot_vector[i,:],order)
A0hinter = basis.collmat(x[i,:], deriv_order = 0); A0hinter[-1,-1] = 1
A1hinter = basis.collmat(x[i,:], deriv_order = 1); A1hinter[-1] = -np.flip(A1hinter[0],0)
A2hinter = basis.collmat(x[i,:], deriv_order = 2); A2hinter[-1,-1] = 1
Chinter = basis.collmat(x_plt[i,:], deriv_order = 0); Chinter[-1,-1] = 1
LayerMat = np.append(LayerMat,np.array([np.dot(A2hinter,np.linalg.inv(A0hinter))]),axis = 0)
A0h = np.append(A0h,A0hinter)
A1h = np.append(A1h,A1hinter)
A2h = np.append(A2h,A2hinter)
Ch = np.append(Ch,Chinter)
#Reshape the long string of appended Matrix, such that
#rows: x-points; colums: i´th basis spline
LayerMat = LayerMat[1:,:,:]
A0h = np.reshape(A0h, (-1,num_of_points))
A1h = np.reshape(A1h, (-1,num_of_points))
A2h = np.reshape(A2h, (-1,num_of_points))
Ch = np.reshape(Ch,(-1,num_of_points))
#Ch => More points in x, but same number of basis splines
#Clearing the interface points, to not double count
N = num_of_points
plp = plt_points
interfaces = np.shape(x)[0]-1
sizeA = np.shape(x)[0]*N-interfaces
sizeCb = np.shape(x)[0]*plp-interfaces
Abig = np.zeros([sizeA,sizeA])
A1b = np.zeros([sizeA,sizeA])
A2b = np.zeros([sizeA,sizeA])
Cb = np.zeros([sizeCb,sizeA])
#Clearing the double counts from the space grid
xflat = x.flatten()
x_plt_flat = x_plt.flatten()
#index of double counts
doublec = np.array([np.arange(1,len(length)-1)])*N
doublec_plt = np.array([np.arange(1,len(length)-1)])*plp
xflat = np.delete(xflat,doublec)
x_plt_flat = np.delete(x_plt_flat,doublec_plt)
#Filling the big matrices.
startA = 0; endA = N-1
startC = 0; endC = plp-1
for i in range(0,interfaces+1):
Abig[startA:endA,startA:endA+1] = A0h[startA+i:endA+i,:]
A1b[startA:endA+1,startA:endA+1] = A1h[startA+i:endA+i+1,:]
A2b[startA:endA+1,startA:endA+1] = A2h[startA+i:endA+i+1,:]
Cb[startC:endC+1,startA:endA+1] = Ch[startC+i:endC+i+1,:]
startA += N-1; endA += N-1
startC += plp-1; endC += plp-1
#Create A00 with no interface condition to correctly compute phi in loop
#The copy needs to be done befor interface conditions are applied in Abig
A00 = Abig.copy()
A00[-1,-1] = 1;
#Here we make init, conductivity & capacity all functions, in case they are
# given as integeres or floats. Also thorw warinings if not every layer has a
# conducitvity or capacity ============================================
#Making init a function, in case it is given as a scalar
if np.size(self.init) == 1 and isinstance(self.init,(int,float)):
dummy = self.init
self.init = lambda x: dummy + 0*x
if len(length) > 2: #multilayer case
if len(length)-1 !=( len(self.heatCapacity) & len(self.conductivity) ):
print('--------------------------------------------------------')
print('The number of different layers must match the number of number of' \
'inputs for Conductivity, heatCapacity, rho.')
print('--------------------------------------------------------')
if np.size(self.conductivity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a conductivity function' \
'Adjust your input of the conductivity functions with respect to the layers.')
print('--------------------------------------------------------')
if np.size(self.heatCapacity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a heatCapacity function value.'\
'Adjust your input of the heatCapacity functions with respect to the layers.')
print('--------------------------------------------------------')
#Make Functions in case heat capacity/conductivity are given as variables
if (all(self.conductivity) or all(self.heatCapacity) or all(self.init)) == False:
print('No heatCapacity, conductivity or initial function given.')
print('--------------------------------------------------------')
#make the conductivity always a function
if len(length) >2 or np.size(self.conductivity)>=2:
for j in list(range (0,np.size(self.conductivity))):
if isinstance(self.conductivity[j],(int,float,list)) :
dummy3 = self.conductivity[j]
self.conductivity[j] = (lambda b: lambda a: b+0*a)(dummy3)
#make the conductivity always a function
for j in list(range (0,np.size(self.heatCapacity))):
if isinstance(self.heatCapacity[j],(int, float,list)) :
dummy4 = self.heatCapacity[j]
self.heatCapacity[j] = (lambda b: lambda a: b+0*a)(dummy4)
else :
if isinstance(self.conductivity[0],(int,float)):
dummy1 = self.conductivity
self.conductivity = [lambda phi: dummy1 + 0*phi]
if isinstance(self.heatCapacity[0],(int,float)):
dummy2 = self.heatCapacity
self.heatCapacity = lambda phi: dummy2 + 0*phi
self.heatCapacity = [self.heatCapacity]
#End of function creation for init(x), conductivity[l](phi), heatCapacity[l](phi)
# with respect to every layer 'l' =====================================
def interconditions(phi,interfaces):
N = num_of_points
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = self.conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = self.conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
#Initial Electron temperature
initphi = self.init(xflat)
initphi_large = self.init(x_plt_flat)
intercon = interconditions(initphi,interfaces)
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for i in range(0,interfaces):
Abig[end_i,start_i:end_i] = intercon[0][i][:-1]#Lhs interface flow
Abig[end_i,end_i+1:end_i+N] = -intercon[1][i][1:]#Rhs interface flow
Abig[end_i,end_i] = intercon[0][i][-1] -intercon[1][i][0]
start_i += N-1; end_i += N-1
Abig[-1,-1] = 1 #to correct Cox algorithm
#Now Matrix Abig is completed and interface condition is applied.
#Treating 2 types of boundary conditions: 0=> Dirichlet; 1=> Neumann,
# where 0´th and -1´th row need to be first order derivatives for flux.
neumannBL = A1b[0].copy();
neumannBR = A1b[-1].copy();
if self.Left_BC_Type == 1: Abig[0] = -neumannBL
if self.Right_BC_Type == 1: Abig[-1] = neumannBR
#Clear for BC! (first and last row need to be cleared to correctly apply BC)
A1b[0] = 0; A2b[0] = 0;
A1b[-1] = 0; A2b[-1] = 0;
#Get inital c coefficients for splines using init (=phi_init)
c = np.dot(np.linalg.inv(A00),self.init(xflat))
#Passed on properties to the simulation class
return(c,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h)
def addLayer(self,L,refind,conductivity,heatCapacity,rho):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
self.length = np.append(self.length,self.length[-1]+L)
#Squeez in the refracitve index between two layers of air: air|...|...|air
self.n = np.concatenate((self.n[:-1],[refind],[self.n[-1]]))
self.conductivity.append(conductivity)
self.heatCapacity.append(heatCapacity)
self.rho = np.append(self.rho,rho)
#==============================================================================
class simulation(object):
def __init__(self,num_of_temp,source):
self.temp_data = temperature() #import the temperatuer object
self.num_of_temp = num_of_temp #1 if only electron temp. 2 if electron and lattice temp.
self.start_time = 0 #starting time (can be negative)
self.final_time = 10 #time when simulation stops
self.time_step = [] #can either be given or is automatically calculated in stability
self.left_BC = 0 #function or constant what the boundary condition
self.right_BC = 0 #on the left or right side of the problem is.
self.stability_lim = [270,3000]
self.temp_data_Lat = [] #Default case is without lattice temperature
self.temp_data_Spin = []
if num_of_temp >= 2: #if Lattice temp is considered
self.temp_data_Lat = temperature() #in case also a lattice module is given
self.coupling = [] #Coupling between Electron and Lattice system
self.left_BC_L = 0 #Setting the default to zero flux
self.right_BC_L = 0 #The BC type is indicated in the temperature class
if num_of_temp == 3: #In case spin coupling is also considered
self.temp_data_Spin = temperature()
self.coupling_LS = [] #Coupling between Lattice and Spin system
self.coupling_SE = [] #Coupling between Electron and Spin system
self.left_BC_S = 0 #Default zero flux Neumann boundary conditions
self.right_BC_S = 0 #On both sides
self.source = source #object source can be passed on
#to depict the properties of the object
def getProperties(self):
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Simulation')
def changeInit(self,system,function):
"""
Change the initial condition of every system.
.changeInit(system,function) has 2 input arguments
system --> string "electron" or "lattice" or "spin"
function --> a function handle or a number defining the value of the
system at t=0 over the entire domain x.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
self.temp_data.init = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
self.temp_data_Lat.init = function
if (system == "spin") or (system == "Spin") or (system == 3):
self.temp_data_Spin = function
def changeBC_Type(self,system,side,BCType):
"""
Function to change the type of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Type(system,side,BCType) has 3 inputs, all of them are strings.
system --> "electron" or "lattice" or "spin". Altenatively: "1", "2", "3"
side --> "left" or "right"
BCType --> "dirichlet" fixing the value/ "neumann" fixing the flux.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Right_BC_Type = 1
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Right_BC_Type = 1
if (system == "spin") or (system == "Spin") or (system == 3):
print("Line 326 Spinsystem")
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Right_BC_Type = 1
def changeBC_Value(self,system,side,function):
"""
Function to change the value of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Value(system,side,function) the first two are strings,
the last one is a function handle or a number.
system --> "electron" or "lattice" or "spin"| Altenatively: "1", "2", "3"
side --> "left" or "right"
function--> function or number fixing the value on the boundaries for all times.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
self.left_BC = function
if side == "right":
self.right_BC = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
self.left_BC_L = function
if side == "right":
self.right_BC_L = function
if (system == "spin") or (system == "Spin") or (system == 3):
if side == "left":
self.left_BC_S = function
if side == "right":
self.right_BC_S = function
def addSubstrate(self,name = "silicon"):
"""
Automatically create in the silicon substrate using input
parameters, mostly taken from:
Contribution of the electron-phonon interaction
to Lindhard energy partition at low energy in Ge and Si
detectors for astroparticle physics applications, by
<NAME> and <NAME>
Note: Refractive index for 400 nm light!
"""
if (name == "Silicon") or (name =="silicon") or (name =="Si"):
k_el_Si = 130#W/(m*K);
k_lat_Si = lambda T: np.piecewise(T,[T<=120.7,T>120.7],\
[lambda T: 100*(0.09*T**3*(0.016*np.exp(-0.05*T)+np.exp(-0.14*T))),
lambda T: 100*(13*1e3*T**(-1.6))])
rho_Si = 2.32e3#kg/(m**3)
C_el_Si = lambda Te: 150/rho_Si *Te
C_lat_Si = 1.6e6/rho_Si
G_Si = 1e17*18#W/(m**3*K)
#Set three layers of Silicon after each other.
#The space resolution on the Film|Substrate edge is high
#and decreases as one moves in bulk direction
if self.num_of_temp == 2:#Lattice only in the 2T
self.temp_data_Lat.addLayer(20e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100000e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
#In the 1 and 2 temperature case electron always gets appended
self.temp_data.addLayer(20e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100000e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
def addLayer(self,L,n,conductivity,heatCapacity,rho,coupling=0,*args):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
#check all input arguments and make them to lists, for the multi layer case
#make list when given as int or float
typecheck = np.array([])
if type(conductivity) is not (list or type(typecheck)):
conductivity = [conductivity]
if type(heatCapacity) is not (list or type(typecheck)):
heatCapacity = [heatCapacity]
#do typecheck only for the lattice system in the 2TM-case
if self.num_of_temp == 2:
if (np.size(conductivity) or np.size(heatCapacity))<2:
print('Lattice parameters are missing.\n Add parameters for Lattice system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
#Only electron spin coupling is under consideration
self.coupling = np.append(self.coupling,coupling)
#do typecheck for the Lattice and the Spin system
if self.num_of_temp == 3:
if (np.size(conductivity) or np.size(heatCapacity) or np.size(coupling))<3:
print('Input parameters are missing.\n Add parameters for '\
'conductivity/heatCapacity or coupling for Lattice/Spin system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
self.temp_data_Spin.addLayer(L,n,conductivity[2],heatCapacity[2],rho)
#In the 3Tm case the coupling input arg is a vector of len 3. Unwrap them:
self.coupling = np.append(self.coupling,coupling[0])
self.coupling_LS = np.append(self.coupling_LS,coupling[1])
self.coupling_SE = np.append(self.coupling_SE,coupling[2])
#For the electronic system always add the parameters!
self.temp_data.addLayer(L,n,conductivity[0],heatCapacity[0],rho)
def interconditions(self,phi,interfaces,conductivity,N,A1h):
"""
A function which gives back an array where the intereface condition is returned
for the left and right side of the interface. Gets called in the E.E.-loop.
"""
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
def sourceprofile(self,absorptionprofile,timeprofile,xflat,x0,t,N):
#Consider Lambert Beers law in space and different types in time
if (absorptionprofile == "LB") and (self.source.fluence is not 0):
optical_penetration_depth = self.source.ref2delta(self.temp_data.n,self.source.lambda_vac)
if (timeprofile == "Gaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer´s absorption law and a Gaussian time profile is applied as source.')
print('-----------------------------------------------------------')
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian)
if (timeprofile == "repGaussian") or (timeprofile == "RepGaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'The frequency of the pulse repetition has to be indicated via s.frequency = number (in 1/seconds).')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
[ttime,amplitude] = self.source.loadData
#To extract the custom time profile and the scaling factor
[sourcemat,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,optical_penetration_depth[0])
#To get the space profile: Source with different optical penetration depth defined on the xflat gird
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime,scaling)
#Consider Transfer Matrix in space and different types in time
if (absorptionprofile == "TMM") and (self.source.fluence is not 0):
"""
This will implement a transfer matrix approach to local absorption
instead as using the Lambert Beer´s law considered in the Gaussian
source type.
"""
#Multiplying with 1e9, since the absorption()-function. In the source module only works if length is in units of nm!
x0m = x0*1e9#converte the lentgh into nm
if len(x0) is not (len(self.temp_data.n)-1):
print('-----------------------------------------------------------')
print('Number of considered layers does not match with given refractive indices.\n'\
'in ´temperature.n(Air|Film layer1|Film layer2|...|Air)´ anly consider the film layers. \n'\
'The refractive index of the substrate gets added automatically later when \n'\
'`simulation.addSubstrate(\'name\')` gets called.')
print('-----------------------------------------------------------')
if (timeprofile == "Gaussian"):
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m)
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a Gaussian time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile of and a custom time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if self.source.loadData is False:
print('-----------------------------------------------------------')
print('Import an array, containing the data of the custom pulse.'\
'arr[0,:] = time; arr[1,:] = amplitude')
print('-----------------------------------------------------------')
[ttime,amplitude] = self.source.loadData
lam = 1#Lamda does not matter here since the spacial absorption is calculated via TMM
[sourceM,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,lam)
#The cfeateTMM(xgrid,timegrid,length,*args) has customtime as an optional argument
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime,scaling)
if (timeprofile == "RepGaussian") or (timeprofile== "repGaussian"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
return(sourceM)
# This is the main Explicit Euler loop where the solution to T(x,t) is calculated.
def run(self):
idealtimestep = self.stability()
if not self.time_step:
self.time_step = idealtimestep
print('-----------------------------------------------------------')
print(' No specific time constant has been indicated. \n '\
'The stability region has been calculated and an appropriate timestep has been chosen.\n '\
'Timestep = {idealtimestep:.2e} s'.format(idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if (self.time_step-idealtimestep)/idealtimestep > 0.1:
print('-----------------------------------------------------------')
print('The manually chosen time step of {time_step:.2e} is eventually too big and could cause instabilities in the simulation.\n '\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if(self.time_step-idealtimestep)/idealtimestep < -0.2:
print('-----------------------------------------------------------')
print('The maunually chosen time step of {time_step:.2e} is very small and will eventually cause a long simulation time.\n'\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
#loading simulation relevant properties from the structural tmeperature object
[c_E,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data.Msetup()
t = np.arange(self.start_time,self.final_time,self.time_step)
#only if the injection would make the time grid smaller, to not move into instable regime
if self.source.FWHM:
if (6*self.source.FWHM/200 < idealtimestep):
#inject 200 extra points around pulse to fully capture the shape of the pulse
tinj = np.linspace(self.source.t0 - 3*self.source.FWHM,self.source.t0 + 3*self.source.FWHM,200)
smaller = np.where(t<self.source.t0 - 3*self.source.FWHM)[0]
bigger = np.where(t>self.source.t0 + 3*self.source.FWHM)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
#If a more refined grid is choosen around t0. We inject a fine time grid around t0, to correctly capture the pulse shape
if self.source.adjusted_grid is not False:
if self.source.dt0 == False:
print('-----------------------------------------------------------')
print('The option for an adjusted grid is True, but no interval for a more refined grid has been given.'/
'Indicate dt0 (around which values the time grid should have higher resolution) in the source object')
print('-----------------------------------------------------------')
if 2*self.source.dt0/self.source.extra_points < idealtimestep:
print('-----------------------------------------------------------')
print('A refined Grid around t0 has been applied')
print('-----------------------------------------------------------')
tinj = np.linspace(self.source.t0-self.source.dt0,self.source.t0+self.source.dt0,self.source.extra_points)
smaller = np.where(t<self.source.t0 - self.source.dt0)[0]
bigger = np.where(t>self.source.t0 + self.source.dt0)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
else:
print('-----------------------------------------------------------')
print('No refined time grid is applied. The timestep is alerady very small.' \
'You can use the simulation class with the property self.time_step and '\
'assign it to a smaller value as the current time step.')
print('-----------------------------------------------------------')
#Initialize the systems and load the matrices
if self.temp_data_Lat:
if self.temp_data.plt_points is not self.temp_data_Lat.plt_points:
self.temp_data_Lat.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print(self.temp_data_Lat.collocpts)
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_L,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_L,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
if self.temp_data_Spin:
print("Line 728 Spinsystem")
if self.temp_data.plt_points is not self.temp_data_Spin.plt_points:
self.temp_data_Spin.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Spin.collocpts:
self.temp_data_Spin.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_S,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_S,interfaces,LayerMat,A1h] = self.temp_data_Spin.Msetup()
if (self.source.fluence == 0):
print('-----------------------------------------------------------')
print('No source is applied.\n'\
'source.fluence = 0')
print('-----------------------------------------------------------')
xmg, tmg = np.meshgrid(xflat,t)
sourceM = np.zeros_like(xmg)
else:
sourceM = self.sourceprofile(self.source.spaceprofile,self.source.timeprofile,xflat,self.temp_data.length,t,N)
#Making the boundary conditions a function of t, in case they are given as scalars
if isinstance(self.left_BC,(int,float)):
dummy = self.left_BC
self.left_BC = lambda t: dummy + 0*t
if isinstance(self.right_BC,(int,float)):
dummy1 = self.right_BC
self.right_BC = lambda t: dummy1 + 0*t
#Makint the boundary conditions a matrix for the electron case
BC_E = np.zeros((len(c_E),len(t)))
BC_E[0] = self.left_BC(t)
BC_E[-1] = self.right_BC(t)
#Checking the Lattice system boundary conditions
if self.temp_data_Lat:
if isinstance(self.left_BC_L,(int,float)):
dummy2 = self.left_BC_L
self.left_BC_L = lambda t: dummy2 + 0*t
if isinstance(self.right_BC_L,(int,float)):
dummy3 = self.right_BC_L
self.right_BC_L = lambda t: dummy3 + 0*t
#Makint the boundary conditions a matrix for the lattice case
BC_L = np.zeros((len(c_L),len(t)))
BC_L[0] = self.left_BC_L(t)
BC_L[-1] = self.right_BC_L(t)
#Checking the Spine system boundary conditions
#It impies that we at least consider 2 temperatures -> under this "if-tree"
if self.temp_data_Spin:
if isinstance(self.left_BC_S,(int,float)):
dummy4 = self.left_BC_S
self.left_BC_S = lambda t: dummy4 + 0*t
if isinstance(self.right_BC_S,(int,float)):
dummy5 = self.right_BC_S
self.right_BC_S = lambda t: dummy5 + 0*t
#Makint the boundary conditions a matrix for the Spin case
BC_S = np.zeros((len(c_S),len(t)))
BC_S[0] = self.left_BC_S(t)
BC_S[-1] = self.right_BC_S(t)
#Check if the Lattice/Spin and Spin/Electron coupling constants have the right size
if np.size(self.coupling_LS)<np.size(length)-1:
self.coupling_LS = self.coupling_LS*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Lattice-Spin coupling constant \'G_LS \'.\n')\
('=> G_LS will be set to the value of the first layer = {coupling_LS[0]:.2e}\n for all other layers.'.format(coupling_LS=self.coupling_LS))
print('-----------------------------------------------------------')
if np.size(self.coupling_SE)<np.size(length)-1:
self.coupling_SE = self.coupling_SE*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Spin-Electron coupling constant \'G_SE \'.\n')\
('=> G_SE will be set to the value of the first layer = {coupling_SE[0]:.2e}\n for all other layers.'.format(coupling_SE=self.coupling_SE))
print('-----------------------------------------------------------')
#If only the two temperature model is considered I only need to check one coupling constant
if np.size(self.coupling)<np.size(length)-1:
self.coupling = self.coupling*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique coupling constant \'G \'.\n')\
('=> G will be set to the value of the first layer = {coupling[0]:.2e}\n for all other layers.'.format(coupling=self.coupling))
print('-----------------------------------------------------------')
# The 3 Temperature Case is being considered
if self.temp_data_Spin:
#Setup arrays for electron temperature
phi_E = np.zeros((len(t),len(x_plt_flat))); phi_E[0] = initphi_large
Flow_1E = np.zeros(len(c_E))
Flow_2E = np.zeros(len(c_E))
dphi_E = np.zeros(len(c_E))
intphi_E = np.zeros(len(c_E))
#Setup arrays for lattice temperature
phi_L = np.zeros((len(t),len(x_plt_flat))); phi_L[0] = initphi_large_L #300*np.ones(len(phi_L[0]))
Flow_1L = np.zeros(len(c_L))
Flow_2L = np.zeros(len(c_L))
dphi_L = np.zeros(len(c_L))
intphi_L = np.zeros(len(c_L))
#Setup arrays for the spin temperature
phi_S = np.zeros((len(t),len(x_plt_flat))); phi_S[0] = initphi_large_S #300*np.ones(len(phi_L[0]))
Flow_1S = np.zeros(len(c_S))
Flow_2S = np.zeros(len(c_S))
dphi_S = np.zeros(len(c_S))
intphi_S = np.zeros(len(c_S))
#General setup for E.E. loop
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1)#correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoide devide through 0 in dphi_L! Clar for BC before intphi calc.
Abig_E = np.copy(Abig) #Since Abig can change due to interconditions we split it here
Abig_L = np.copy(Abig) #The interface conditions are applied on every time step
Abig_S = np.copy(Abig) #Every system gets individual matrix
start_EL = time.time()
for i in tqdm(range(1,len(t)),position = 0):
#Calculate Solution at every time step and respective derivatives
phi0_E = np.dot(A00,c_E); phi1_E = np.dot(A1b,c_E); phi2_E = np.dot(A2b,c_E)
phi0_L = np.dot(A00,c_L); phi1_L = np.dot(A1b,c_L); phi2_L = np.dot(A2b,c_L)
phi0_S = np.dot(A00,c_S); phi1_S = np.dot(A1b,c_S); phi2_S = np.dot(A2b,c_S)
#Calculating interface conditions which are applied later
intercon_E = self.interconditions(phi_E[i-1],interfaces,self.temp_data.conductivity,N,A1h)
intercon_L = self.interconditions(phi_L[i-1],interfaces,self.temp_data_Lat.conductivity,N,A1h)
intercon_S = self.interconditions(phi_S[i-1],interfaces,self.temp_data_Spin.conductivity,N,A1h)
startf = 0;endf = N-1
#Construct all picewise flows and piecewise dphi. Iterate over layers
for j in range(0,interfaces+1):
#electron: d/dx[k(phi) * d/dx(phi)]
Flow_1E[startf:endf] = self.temp_data.diff_conductivity(phi0_E[startf:endf],j)
Flow_2E[startf:endf] = self.temp_data.conductivity[j](phi0_E[startf:endf])
Flow_1E[startf:endf] *=phi1_E[startf:endf]**2
Flow_2E[startf:endf] *= phi2_E[startf:endf]
#lattice
Flow_1L[startf:endf] = self.temp_data_Lat.diff_conductivity(phi0_L[startf:endf],j)
Flow_2L[startf:endf] = self.temp_data_Lat.conductivity[j](phi0_L[startf:endf])
Flow_1L[startf:endf] *=phi1_L[startf:endf]**2
Flow_2L[startf:endf] *= phi2_L[startf:endf]
#Spin
Flow_1S[startf:endf] = self.temp_data_Spin.diff_conductivity(phi0_S[startf:endf],j)
Flow_2S[startf:endf] = self.temp_data_Spin.conductivity[j](phi0_S[startf:endf])
Flow_1S[startf:endf] *=phi1_S[startf:endf]**2
Flow_2S[startf:endf] *= phi2_S[startf:endf]
#calculate delta phi for electron, lattice and spin
#This is the core of the problem
dphi_E[startf:endf] = 1/(self.temp_data.heatCapacity[j](phi0_E)[startf:endf]*self.temp_data.rho[j])*\
(Flow_1E[startf:endf]+Flow_2E[startf:endf]+sourceM[i,startf:endf] +\
self.coupling[j]*(phi0_L[startf:endf]-phi0_E[startf:endf])+self.coupling_SE[j]*(phi0_S[startf:endf]-phi0_E[startf:endf]))
#Lattice time derivative
dphi_L[startf:endf] = 1/(self.temp_data_Lat.heatCapacity[j](phi0_L)[startf:endf]*self.temp_data_Lat.rho[j])*\
(Flow_1L[startf:endf]+Flow_2L[startf:endf] +\
self.coupling[j]*(phi0_E[startf:endf]-phi0_L[startf:endf])+self.coupling_LS[j]*(phi0_S[startf:endf]-phi0_L[startf:endf]))
#Spin system time derivative
dphi_S[startf:endf] = 1/(self.temp_data_Spin.heatCapacity[j](phi0_S)[startf:endf]*self.temp_data_Spin.rho[j])*\
(Flow_1S[startf:endf]+Flow_2S[startf:endf] +\
self.coupling_LS[j]*(phi0_L[startf:endf]-phi0_S[startf:endf])+self.coupling_SE[j]*(phi0_E[startf:endf]-phi0_S[startf:endf]))
startf += N-1; endf +=N-1 #Move one layer further
start_i = 0; end_i = N-1
#Apply interface conditions for all layers in every time step, i.e.:
#filling up Abig wiht the interface condition in the middle of the grid
for k in range(0,interfaces):
#for the electron system
Abig_E[end_i,start_i:end_i] = intercon_E[0][k][:-1]#Lhs interface flow
Abig_E[end_i,end_i+1:end_i+N] = -intercon_E[1][k][1:]#Rhs interface flow
Abig_E[end_i,end_i] = intercon_E[0][k][-1] -intercon_E[1][k][0]
#for the lattice system
Abig_L[end_i,start_i:end_i] = intercon_L[0][k][:-1]#Lhs interface flow
Abig_L[end_i,end_i+1:end_i+N] = -intercon_L[1][k][1:]#Rhs interface flow
Abig_L[end_i,end_i] = intercon_L[0][k][-1] -intercon_L[1][k][0]
#for the Spin system
Abig_S[end_i,start_i:end_i] = intercon_S[0][k][:-1]#Lhs interface flow
Abig_S[end_i,end_i+1:end_i+N] = -intercon_S[1][k][1:]#Rhs interface flow
Abig_S[end_i,end_i] = intercon_S[0][k][-1] -intercon_S[1][k][0]
start_i += N-1; end_i += N-1
#computing the flux for every time step at the boundaries
#If Neumann BC-> devide over k(T) since BC_Type = 1
#If Dirichlet BC -> devide over 1 since BC_Type = 0
Flux_E = BC_E[:,i]#Avoidint 0 in denominator
Flux_E[0] /= self.temp_data.conductivity[0](c_E[0])**self.temp_data.Left_BC_Type + 1e-12
Flux_E[-1] /= self.temp_data.conductivity[-1](c_E[-1])**self.temp_data.Right_BC_Type + 1e-12
Flux_L = BC_L[:,i]
Flux_L[0] /= self.temp_data_Lat.conductivity[0](c_L[0])**self.temp_data_Lat.Left_BC_Type + 1e-12
Flux_L[-1] /= self.temp_data_Lat.conductivity[-1](c_L[-1])**self.temp_data_Lat.Right_BC_Type + 1e-12
Flux_S = BC_S[:,i]
Flux_S[0] /= self.temp_data_Spin.conductivity[0](c_S[0])**self.temp_data_Spin.Left_BC_Type + 1e-12
Flux_S[-1] /= self.temp_data_Spin.conductivity[-1](c_S[-1])**self.temp_data_Spin.Right_BC_Type + 1e-12
#Clear for boundary conditions at the edgeds of the grid
dphi_E[0] = 0; dphi_E[-1] = 0;
phi0_E[0] = 0; phi0_E[-1] = 0;
dphi_L[0] = 0; dphi_L[-1] = 0;
phi0_L[0] = 0; phi0_L[-1] = 0;
dphi_S[0] = 0; dphi_S[-1] = 0;
phi0_S[0] = 0; phi0_S[-1] = 0;
#intermediate phi with low resolution in space according to explicit euler
intphi_E = phi0_E + tstep[i] * dphi_E + Flux_E
intphi_L = phi0_L + tstep[i] * dphi_L + Flux_L
intphi_S = phi0_S + tstep[i] * dphi_S + Flux_S
#Interface condition: Setting the rhs to 0, such that the heat transported (flux = Q = k*d/dx phi)
#from left is what comes out at the right hand side Q_1 -> Q_2
intphi_E[condi] = 0 #Interface condition: Q_1 -Q_2 = 0
intphi_L[condi] = 0
intphi_S[condi] = 0
#electron: use c to map on high resolution x-grid
#since in Abig, k(T(t)) is inserted we have to solve the system for every step
c_E = np.linalg.solve(Abig_E,intphi_E) # c(t) for every timestep
phi_E[i] = np.dot(Cb,c_E) # map spline coefficients to fine Cb grid
phi_E[i,cnfill] = c_E[condi] #correct the values for phi at interface
#lattice
c_L = np.linalg.solve(Abig_L,intphi_L)
phi_L[i] = np.dot(Cb,c_L)
phi_L[i,cnfill] = c_L[condi]
#spin
c_S = np.linalg.solve(Abig_S,intphi_S)
phi_S[i] = np.dot(Cb,c_S)
phi_S[i,cnfill] = c_S[condi]
end_EL = time.time()
print('-----------------------------------------------------------')
print('Heat diffusion in a coupled electron-latticelspin system has been simulated')
print('Eleapsed time in E.E.- loop:', end_EL-start_EL)
print('-----------------------------------------------------------')
T = []
T.append(phi_E); T.append(phi_L); T.append(phi_S)
return(x_plt_flat,t,T)
#=======End 3 temp Case =================================
#The two temperature model is considered
if self.temp_data_Lat:
#Setup arrays for electron temperature
phi_E = np.zeros((len(t),len(x_plt_flat))); phi_E[0] = initphi_large
Flow_1E = np.zeros(len(c_E))
Flow_2E = np.zeros(len(c_E))
dphi_E = np.zeros(len(c_E))
intphi_E = np.zeros(len(c_E))
#Setup arrays for lattice temperature
phi_L = np.zeros((len(t),len(x_plt_flat))); phi_L[0] = initphi_large_L
Flow_1L = np.zeros(len(c_L))
Flow_2L = np.zeros(len(c_L))
dphi_L = np.zeros(len(c_L))
intphi_L = np.zeros(len(c_L))
#General setup for E.E. loop
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1)#correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoide devide through 0 in dphi_L! Clar for BC before intphi calc.
Abig_E = np.copy(Abig) #Since Abig can change due to interconditions we split it here
Abig_L = np.copy(Abig) #The interface conditions are applied on every time step
start_EL = time.time()
for i in tqdm(range(1,len(t)),position = 0):
#Calculate Solution at every time step and respective derivatives
phi0_E = np.dot(A00,c_E); phi1_E = np.dot(A1b,c_E); phi2_E = np.dot(A2b,c_E)
phi0_L = np.dot(A00,c_L); phi1_L = np.dot(A1b,c_L); phi2_L = np.dot(A2b,c_L)
#Calculating interface conditions which are applied later
intercon_E = self.interconditions(phi_E[i-1],interfaces,self.temp_data.conductivity,N,A1h)
intercon_L = self.interconditions(phi_L[i-1],interfaces,self.temp_data_Lat.conductivity,N,A1h)
startf = 0;endf = N-1
#Construct all picewise flows and piecewise dphi Iterate over layers
for j in range(0,interfaces+1):
#electron
Flow_1E[startf:endf] = self.temp_data.diff_conductivity(phi0_E[startf:endf],j)
Flow_2E[startf:endf] = self.temp_data.conductivity[j](phi0_E[startf:endf])
Flow_1E[startf:endf] *=phi1_E[startf:endf]**2
Flow_2E[startf:endf] *= phi2_E[startf:endf]
#lattice
Flow_1L[startf:endf] = self.temp_data_Lat.diff_conductivity(phi0_L[startf:endf],j)
Flow_2L[startf:endf] = self.temp_data_Lat.conductivity[j](phi0_L[startf:endf])
Flow_1L[startf:endf] *=phi1_L[startf:endf]**2
Flow_2L[startf:endf] *= phi2_L[startf:endf]
#calculate delta phi for electron and lattice
#This is the core of the problem
dphi_E[startf:endf] = 1/(self.temp_data.heatCapacity[j](phi0_E)[startf:endf]*self.temp_data.rho[j])*\
(Flow_1E[startf:endf]+Flow_2E[startf:endf]+sourceM[i,startf:endf] + self.coupling[j]*(phi0_L[startf:endf]-phi0_E[startf:endf]))
dphi_L[startf:endf] = 1/(self.temp_data_Lat.heatCapacity[j](phi0_L)[startf:endf]*self.temp_data_Lat.rho[j])*\
(Flow_1L[startf:endf]+Flow_2L[startf:endf] + self.coupling[j]*(phi0_E[startf:endf]-phi0_L[startf:endf]))
startf += N-1; endf +=N-1
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for k in range(0,interfaces): #Apply interface conditions for all layers in every time step
#for the electron system
Abig_E[end_i,start_i:end_i] = intercon_E[0][k][:-1]#Lhs interface flow
Abig_E[end_i,end_i+1:end_i+N] = -intercon_E[1][k][1:]#Rhs interface flow
Abig_E[end_i,end_i] = intercon_E[0][k][-1] -intercon_E[1][k][0]
#for the lattice system
Abig_L[end_i,start_i:end_i] = intercon_L[0][k][:-1]#Lhs interface flow
Abig_L[end_i,end_i+1:end_i+N] = -intercon_L[1][k][1:]#Rhs interface flow
Abig_L[end_i,end_i] = intercon_L[0][k][-1] -intercon_L[1][k][0]
start_i += N-1; end_i += N-1
#computing the flux for every time step for the boundaries
Flux_E = BC_E[:,i]
Flux_E[0] /= self.temp_data.conductivity[0](c_E[0])**self.temp_data.Left_BC_Type + 1e-12
Flux_E[-1] /= self.temp_data.conductivity[-1](c_E[-1])**self.temp_data.Right_BC_Type + 1e-12
Flux_L = BC_L[:,i]
Flux_L[0] /= self.temp_data_Lat.conductivity[0](c_L[0])**self.temp_data_Lat.Left_BC_Type + 1e-12
Flux_L[-1] /= self.temp_data_Lat.conductivity[-1](c_L[-1])**self.temp_data_Lat.Right_BC_Type + 1e-12
#Clear for boundary conditions at the edgeds of the grid
dphi_E[0] = 0; dphi_E[-1] = 0; dphi_L[0] = 0; dphi_L[-1] = 0
phi0_E[0] = 0; phi0_E[-1] = 0; phi0_L[0] = 0; phi0_L[-1] = 0;
#intermediate phi with low resolution in space according to explicit euler
intphi_E = phi0_E + tstep[i] * dphi_E + Flux_E
intphi_E[condi] = 0
intphi_L = phi0_L + tstep[i] * dphi_L + Flux_L
intphi_L[condi] = 0 #Interface condition: Q_1 -Q_2 = 0
#electron: use c to map on high resolution x-grid
#since in Abig, k(T(t)) is inserted we have to solve the system for every step
c_E = np.linalg.solve(Abig_E,intphi_E) # c(t) for every timestep
phi_E[i] = np.dot(Cb,c_E) # map spline coefficients to fine Cb grid
phi_E[i,cnfill] = c_E[condi] #correct the values for phi at interface
#lattice
c_L = np.linalg.solve(Abig_L,intphi_L)
phi_L[i] = np.dot(Cb,c_L)
phi_L[i,cnfill] = c_L[condi]
end_EL = time.time()
print('-----------------------------------------------------------')
print('Heat diffusion in a coupled electron-lattice system has been simulated')
print('Eleapsed time in E.E.- loop:', end_EL-start_EL)
print('-----------------------------------------------------------')
T = []
T.append(phi_E); T.append(phi_L)
return(x_plt_flat,t,T)
#=============End 2Temp Case ========================
else: #this is the single temperature case. (Only electron temperature)
#prepare space to store phi solution on fine plt grid. And Flow_1,2 vectors
phi = np.zeros((len(t),len(x_plt_flat))); phi[0] = initphi_large
Flow_1 = np.zeros(len(c_E))
Flow_2 = np.zeros(len(c_E))
dphi = np.zeros(len(c_E))
intphi = np.zeros(len(c_E))
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1) #correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoid 1/0 division in dphi calculation. See E.E. loop
startE = time.time()
for i in tqdm(range(1,len(t)),position = 0):
phi0 = np.dot(A00,c_E); phi1 = np.dot(A1b,c_E); phi2 = np.dot(A2b,c_E)
intercon_E = self.interconditions(phi[i-1],interfaces,self.temp_data.conductivity,N,A1h) #get interface conditions for every time step
startf = 0;endf = N-1
#construct all picewise flows and piecewise dphi
for j in range(0,interfaces+1):
Flow_1[startf:endf] = self.temp_data.diff_conductivity(phi0[startf:endf],j)
Flow_2[startf:endf] = self.temp_data.conductivity[j](phi0[startf:endf])
Flow_1[startf:endf] *=phi1[startf:endf]**2
Flow_2[startf:endf] *= phi2[startf:endf]
dphi[startf:endf] = 1/(self.temp_data.heatCapacity[j](phi0)[startf:endf]*self.temp_data.rho[j])*\
(Flow_1[startf:endf]+Flow_2[startf:endf]+sourceM[i,startf:endf])
startf += N-1; endf +=N-1
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for k in range(0,interfaces): #Apply interface conditions for all layers in every time step
Abig[end_i,start_i:end_i] = intercon_E[0][k][:-1]#Lhs interface flow
Abig[end_i,end_i+1:end_i+N] = -intercon_E[1][k][1:]#Rhs interface flow
Abig[end_i,end_i] = intercon_E[0][k][-1] -intercon_E[1][k][0]
start_i += N-1; end_i += N-1
#computing the flux for every time step for the boundaries
Flux_E = BC_E[:,i]
Flux_E[0] /= self.temp_data.conductivity[0](c_E[0])**self.temp_data.Left_BC_Type + 1e-12
Flux_E[-1] /= self.temp_data.conductivity[-1](c_E[-1])**self.temp_data.Right_BC_Type + 1e-12
#Make space for BC
dphi[0] = 0; dphi[-1] = 0
phi0[0] = 0; phi0[-1] = 0
intphi = phi0 + tstep[i] * dphi + Flux_E
intphi[condi] = 0 #Interface condition: Q_1 -Q_2 = 0
# c(t) for every timestep
#since in Abig k(T(t)) is inserted we have to solve the system for every step
c_E = np.linalg.solve(Abig,intphi) #this system has to be solved in every time step
phi[i] = np.dot(Cb,c_E) # map spline coefficients to fine Cb grid
phi[i,cnfill] = c_E[condi] #correct the values for phi at interface
endE = time.time()
print('-----------------------------------------------------------')
print('Electron temperature heat diffusion has been simulated.')
print('Eleapsed time in E.E.- loop:', endE-startE)
print('-----------------------------------------------------------')
return(x_plt_flat,t,phi)
def stability(self):
"""
If only the electron temperature system is under consideration, we only
compute the eigenvalues of lambda_i = k/(C*rho)*A00^-1*A2b. This is
we consider the minimum Eigenvalue for each layer to represent the time konstant.
The time constant for E.E. is then given by -2/min(lambda_i), which is
the criterion for stability for E.E. loops, to obtain convergence.
"""
[c,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data.Msetup()
A00[0,0] = 1; A00[-1,-1] = 1
rho_E = self.temp_data.rho
conductivity_E = self.temp_data.conductivity
conductivity_E = np.asarray(conductivity_E)
typecheck = np.array([1])[0]
for i in range(0,len(conductivity_E)):
#In case conductivity is a function k(T) we compute a worst case scenario
#this is because we can only compare integers.
if not isinstance(conductivity_E[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_E[i] = max(conductivity_E[i](testT))
heatCapacity_E = self.temp_data.heatCapacity
heatCapacity_E = np.asarray(heatCapacity_E)
for i in range(0,len(heatCapacity_E)):
#In case heatCapacity is a function C(T) we compute a worst case scenario
#and take an integer value to compare
if not isinstance(heatCapacity_E[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_E[i] = min(heatCapacity_E[i](testT))
Eval = np.zeros(interfaces+1) #for each layer there will be an eigenvalue
koeff1 = conductivity_E/(heatCapacity_E*rho_E)
for i in range(0,interfaces+1):
Lambda = koeff1[i]*LayerMat[i]
Eval[i] = min(np.real(np.linalg.eig(Lambda)[0]))
tkonst_E = -1.9/Eval
if self.num_of_temp == 2:
"""
In the multy temperature case, we also consider the lattice dynamics,
with respective k_L/(C_L*rho_L) dynamics. In addition, we also have to
consider, the coupling between those two layers. I.e. G_mat.
with coefficients koeff_2 = G/(heatCapacity*rho)
Therefor we compute eigenvalues of the combined system:
lambda_i = eval(Lambda + G_mat) for each layer.
The time constant is again -2/min(lambda_i)
"""
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c,A00_L,Abig,A1b,A2b_L,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
A00_L[0,0] = 1; A00_L[-1,-1] = 1
rho_L = self.temp_data_Lat.rho
G = self.coupling
G = np.asarray(G)
conductivity_L = self.temp_data_Lat.conductivity
conductivity_L = np.asarray(conductivity_L)
#In case conductivity is a function k(T) we compute a worst case scenario
for i in range(0,len(conductivity_L)):
if not isinstance(conductivity_L[i],(int ,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_L[i] = max(conductivity_L[i](testT))
heatCapacity_L = self.temp_data_Lat.heatCapacity
heatCapacity_L = np.asarray(heatCapacity_L)
#In case heatCapacity is a function C(T) we compute a worst case scenario
for i in range(0,len(heatCapacity_L)):
if not isinstance(heatCapacity_L[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_L[i] = min(heatCapacity_L[i](testT))
#M: for every layer we load the respective matrix from the temperature class
M = np.shape(LayerMat)[1]
Lambda = np.zeros((2*M,2*M))
Eval = np.zeros(interfaces+1)
G_mat = np.zeros((2*M,2*M))
koeff1_E = conductivity_E/(heatCapacity_E*rho_E)
koeff1_L = conductivity_L/(heatCapacity_L*rho_L)
koeff2_E = G/(heatCapacity_E*rho_E)
koeff2_L = G/(heatCapacity_L*rho_L)
for i in range(0,interfaces+1):
Lambda[0:M,0:M] = koeff1_E[i]*LayerMat[i]
Lambda[M:,M:] = koeff1_L[i]*LayerMat[i]
G_mat[0:M,0:M] = -koeff2_E[i]*np.eye(M)
G_mat[0:M,M:] = koeff2_E[i]*np.eye(M)
G_mat[M:,0:M] = koeff2_L[i]*np.eye(M)
G_mat[M:,M:] = -koeff2_L[i]*np.eye(M)
Eval[i] = min(np.real(np.linalg.eig(Lambda+G_mat)[0]))
tkonst = -1.9/Eval
return(min(tkonst))
if self.num_of_temp == 3:
"""
Consider the case of a three temperature odel and follow the same
procedure as in the two TM case, except for now take all the coupling
constants int consideration!
"""
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Spin.collocpts:
self.temp_data_Spin.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c,A00_L,Abig,A1b,A2b_L,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
A00_L[0,0] = 1; A00_L[-1,-1] = 1
rho = self.temp_data_Lat.rho
#Load different coupling constants and make them arrays
G_EL = self.coupling; G_EL = np.asarray(G_EL)
G_LS = self.coupling_LS; G_LS = np.asarray(G_LS)
G_SE = self.coupling_SE; G_SE = np.asarray(G_SE)
conductivity_L = self.temp_data_Lat.conductivity
conductivity_L = np.asarray(conductivity_L)
conductivity_S = self.temp_data_Spin.conductivity
conductivity_S = np.asarray(conductivity_S)
heatCapacity_L = self.temp_data_Lat.heatCapacity
heatCapacity_L = np.asarray(heatCapacity_L)
heatCapacity_S = self.temp_data_Spin.heatCapacity
heatCapacity_S = np.asarray(heatCapacity_S)
#In case heatCapacity is a function C(T) we compute a worst case scenario
#That is we reduce the problem into a constant coefficient case
for i in range(0,len(conductivity_L)):
if not isinstance(conductivity_L[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_L[i] = max(conductivity_L[i](testT))
for i in range(0,len(conductivity_S)):
if not isinstance(conductivity_S[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_S[i] = max(conductivity_S[i](testT))
for i in range(0,len(heatCapacity_L)):
if not isinstance(heatCapacity_L[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_L[i] = min(heatCapacity_L[i](testT))
for i in range(0,len(heatCapacity_S)):
if not isinstance(heatCapacity_S[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_S[i] = min(heatCapacity_S[i](testT))
#construct Matrices for the Kronecker product
K11 = np.array([[1,0,0],[0,0,0],[0,0,0]])
K12 = np.array([[0,1,0],[0,0,0],[0,0,0]])
K13 = np.array([[0,0,1],[0,0,0],[0,0,0]])
K21 = np.array([[0,0,0],[1,0,0],[0,0,0]])
K22 = np.array([[0,0,0],[0,1,0],[0,0,0]])
K23 = np.array([[0,0,0],[0,0,1],[0,0,0]])
K31 = np.array([[0,0,0],[0,0,0],[1,0,0]])
K32 = np.array([[0,0,0],[0,0,0],[0,1,0]])
K33 = np.array([[0,0,0],[0,0,0],[0,0,1]])
#Unity matrix for kronecker product on the RHS and palce to store Eval
unity = np.eye(np.shape(LayerMat)[1])
Eval = np.zeros(interfaces+1)
#Compute the minimum eigenvalue for every layer
for i in range(0,interfaces+1):
coeff_E = conductivity_E[i]/(heatCapacity_E[i]*rho[i])
coeff_L = conductivity_L[i]/(heatCapacity_L[i]*rho[i])
coeff_S = conductivity_S[i]/(heatCapacity_S[i]*rho[i])
Lambda = np.kron(K11,coeff_E*LayerMat[i])
Lambda += np.kron(K22,coeff_L*LayerMat[i])
Lambda += np.kron(K33,coeff_S*LayerMat[i])
G_mat = np.kron(K11,-unity*coeff_E*(G_EL[i]+G_SE[i]))
G_mat += np.kron(K12,unity*coeff_E*G_EL[i])
G_mat += np.kron(K13,unity*coeff_E*G_SE[i])
G_mat += np.kron(K21,unity*coeff_L*G_EL[i])
G_mat += np.kron(K22,-unity*coeff_L*(G_EL[i]+G_LS[i]))
G_mat += np.kron(K23,unity*coeff_L*G_LS[i])
G_mat += np.kron(K31,unity*coeff_S*G_SE[i])
G_mat += np.kron(K32,unity*coeff_S*G_LS[i])
G_mat += np.kron(K33,-unity*coeff_S*(G_SE[i]+G_LS[i]))
#Compute the minimum eigenvalue for each layer
Eval[i] = min(np.real(np.linalg.eig(Lambda+G_mat)[0]))
tkonst = -1.9/Eval
#The global time constant will be guided by the fastest dynamics
#of all the layers!
return(min(tkonst))
else:
#if there is only electron temperature, only those dynamics will be
#considered, when time step for the E.E. loop is calculated.
return(min(tkonst_E))
class source(object):
def __init__(self):
self.spaceprofile = 'TMM'
self.timeprofile = 'Gaussian'
self.fluence = 0
self.t0 = 0
self.FWHM = False
self.loadData = False
self.multipulse = False
self.frequency = False
self.num_of_pulses = False
self.adjusted_grid = False
self.dt0 = False
self.extra_points = 200
self.theta_in = 0# 0 is perpendicular to surface/ pi/2 is grazing
self.lambda_vac = False
self.polarization = 'p'
def getProperties(self): # to depict the properties of the object
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Source')
def ref2delta(self,refindex,lambdavac):
"""
Use the refractive index and compute the optical penetration depth
This is used for Lambert Beer´s absorption law.
"""
lambdavac_m = lambdavac*1e-9
#corp away the two layers of air and only consider target film layers
refindex = refindex[1:-1]
#If there is no imaginary part we avoid dividing over 0
for i in range(0,len(refindex)):
if np.imag(refindex[i]) == 0:
refindex[i] = refindex[i] + 1e-9j
deltap = (4*np.pi/lambdavac_m*np.imag(refindex))**(-1)
return(deltap)
def Gaussian(self,xmg,tmg,lam,A,sigma2,x0,customtime = None):
if not (self.fluence or self.FWHM):
print('------------------------------------------------------------')
print('Create a pulse with defining pulse properties. \n ' +\
'.fluence, .optical_penetration_depth, .FWHM')
print('------------------------------------------------------------')
if np.any(customtime) == None:
#Create a source with respect to each lam of every layer. Use the init_G_source function
Gauss = A*np.exp(-(tmg-self.t0)**2/(2*sigma2)) #Gaussian in time
else:
Gauss = A*customtime#custom in time
Gauss *= lam*np.exp(-lam*(xmg-x0))#space profile: LB decay
return(Gauss)
def init_G_source(self,xflat,x0,t,opt_pen,N,func,customtime = None,scaling = 0):
"""
First an empty array 'sourceM' is created.
Then we iterate over the different layers and call
func --> Gaussian.
This will create a 2D (time, space) Gaussian source grid
with different lam[i].
For each layer, the problem is receted, i.e. we have new
Amplitude, new scope of the x-grid, new lambda. Only sigma stays the same.
"""
lam = 1/opt_pen
#create space for solution of source in matrix form
xmg, tmg = np.meshgrid(xflat,t)
sourceM = np.zeros(np.shape(xmg))
if np.any(customtime) == None:
#Convert the input, fluence & FWHM given in 'source' class to Amplitude and sigma
sigma2 = self.FWHM**2/(2*np.log(2))
A = self.fluence/np.sqrt(2*np.pi*sigma2)
#loop over all layers and change lambda, Amplitude and scope of the x-grid
startL = 0; endL = N-1
for i in range(2,len(opt_pen)+2):
sourceM[:,startL:endL] = func(xmg[:,startL:endL],tmg[:,startL:endL],lam[i-2],A,sigma2,x0[i-2])
#at the end of each loop: the intensity of the end of previous layer
#will be the starting intensity of the next layer
A = A*np.exp(-(x0[i-1]-x0[i-2])*lam[i-2])
startL = endL; endL = i*N-i+1
else:#In the case of LB in space and custom in time
if (self.timeprofile== "RepGaussian") or (self.timeprofile=="repGaussian"):
sigma2 = self.FWHM**2/(2*np.log(2))
A = self.fluence/np.sqrt(2*np.pi*sigma2)
startL = 0; endL = N-1
for i in range(2,len(opt_pen)+2):
sourceM[:,startL:endL] = func(xmg[:,startL:endL],tmg[:,startL:endL],lam[i-2],A,sigma2,x0[i-2],customtime[:,startL:endL])
#at the end of each loop: the intensity of the end of previous layer
#will be the starting intensity of the next layer
A = A*np.exp(-(x0[i-1]-x0[i-2])*lam[i-2])
startL = endL; endL = i*N-i+1
if (self.timeprofile== "custom") or (self.timeprofile == "Custom"):
A = scaling#calculated in the custom() function
sigma2 = 0#It is not needed
startL = 0; endL = N-1
for i in range(2,len(opt_pen)+2):
sourceM[:,startL:endL] = func(xmg[:,startL:endL],tmg[:,startL:endL],lam[i-2],A,sigma2,x0[i-2],customtime[:,startL:endL])
#at the end of each loop: the intensity of the end of previous layer
#will be the starting intensity of the next layer
A = A*np.exp(-(x0[i-1]-x0[i-2])*lam[i-2])
startL = endL; endL = i*N-i+1
return(sourceM)
#mytime is the timegrid of the simulation
#time, amplitude are the timegrids of the inputdata collected from the lab
def custom(self,mytime,myspace,ttime,amplitude,opt_pen):
lam = 1/opt_pen
#Mapping the obtained data to the simulation time grid via interpolation
ampl1D = np.interp(mytime,ttime,amplitude**2)
#Compute the right amplitude. using the area under the curve
integr = np.trapz(ampl1D,mytime,np.diff(mytime))
#scling factore to get the amplitude right
scaling = self.fluence/integr
xmg,tmg = np.meshgrid(myspace,mytime)
ampltime= np.interp(tmg,ttime,amplitude**2)
#ampltime *= scaling
ampl2D = ampltime*lam*np.exp(-lam*xmg)
return(ampl2D,ampltime,scaling)
def fresnel(self,theta_in,n_in,n_out,pol):
n_in = complex(n_in); n_out = complex(n_out)
theta_out = np.arcsin(n_in*np.sin(theta_in)/n_out)
if pol == 's':
rs = (n_in*np.cos(theta_in) - n_out*np.cos(theta_out))/\
(n_in*np.cos(theta_in) + n_out*np.cos(theta_out))
ts = 2*n_in*np.cos(theta_in)/(n_in*np.cos(theta_in)+n_out*np.cos(theta_out))
return(theta_out,rs,ts)
if pol == 'p':
rp = (n_out*np.cos(theta_in)-n_in*np.cos(theta_out))/\
(n_out*np.cos(theta_in)+n_in*np.cos(theta_out))
tp = 2* n_in*np.cos(theta_in)/(n_out*np.cos(theta_in)+n_in*np.cos(theta_out))
return(theta_out,rp,tp)
def TM(self,theta_in,lambda0,n_vec,d_vec,pol):
#create complex arrays for variables
theta = np.zeros(len(n_vec), dtype = complex); theta[0] = theta_in
phi = np.zeros(len(n_vec),dtype = complex)
rn = np.zeros(len(n_vec)-1, dtype = complex)
tn = np.zeros_like(rn,dtype = complex)
M_n = np.zeros((len(n_vec),2,2), dtype = complex)
M = np.eye(2,dtype = complex)
for i in range(len(n_vec)-1): # to obtian all angels/rn/tn for each layer
[theta[i+1],rn[i],tn[i]] = self.fresnel(theta[i],n_vec[i],n_vec[i+1],pol)
#M = M0*M1*M2*M4*....
for k in range(1,len(n_vec)-1):#loop over all interfaces except 1st
phi[k] = 2*np.pi*n_vec[k]*np.cos(theta[k])*d_vec[k]/lambda0
Tn = np.array([[np.exp(-1j*phi[k]),0],[0,np.exp(1j*phi[k])]],dtype = complex)/tn[k]
Pn = np.array([[1,rn[k]],[rn[k],1]],dtype = complex)
M_n[k] = np.dot(Tn,Pn)
M = np.dot(M,M_n[k])
#compute for the first interface:
trans0 = np.array([[1,rn[0]],[rn[0],1]],dtype= complex)/tn[0]
M = np.dot(trans0,M)
#Complex transmission/reflection amplitude
t = 1/M[0,0]
r = M[1,0]/M[0,0]
#Fraction of power transmitted
if pol == 's': #s-polarized
T = np.abs(t)**2*np.real(n_vec[-1]*np.cos(theta[-1]))/\
np.real(n_vec[0]*np.cos(theta[0]))
elif pol == 'p': #p-polarized
T = np.abs(t)**2*np.real(n_vec[-1]*np.cos(np.conj(theta[-1])))/\
np.real(n_vec[0]*np.cos(np.conj(theta[0])))
#Fraction of power reflected
R = np.abs(r)**2
A = 1.-T-R
return(M,M_n,t,r,T,R,A,theta)
def layerAmpl(self,theta_in,lambda0,n_vec,d_vec,pol):
"""
After r & t have been calculated and all the respective matrices M_n
for each layer are known, we can go 'backwards', i.e. from the last to the
first layer, and compute all the amplituedes for the forward v_n and
backward w_n traveling wave. -> [v_n,w_n].T = M_n @ [v_{n+1},w_{n+1}].T
"""
[M,M_n,t,r,T,R,A,theta] = self.TM(theta_in,lambda0,n_vec,d_vec,pol)
vw_list = np.zeros((len(n_vec),2),dtype = complex)
vw =np.array([[t],[0]])
vw_list[-1,:] = vw.T
for i in range(len(n_vec)-2,0,-1):
vw = np.dot(M_n[i],vw)
vw_list[i,:] = vw.T
return(vw_list,theta)
def absorption(self,theta_in,lambda0,n_vec,d_vec,pol,points):
#reload the forward and backward wave coefficients for every layer
[vw_n,theta] = self.layerAmpl(theta_in,lambda0,n_vec,d_vec,pol)
total_len = np.sum(d_vec[1:-1])
pointcount = 0
#a is an array where the normalized absorbtion for the entire grid is stored
a = []
for i in range(1,len(n_vec)-1):
kz = 2*np.pi*n_vec[i]* | np.cos(theta[i]) | numpy.cos |
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import time
import random
from itertools import chain
# Digit data loading
#
# In[8]:
training_data = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\trainingimages",skip_blank_lines=False, header=None,squeeze = True)
training_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\traininglabels",skip_blank_lines=False, header=None)
test_images = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\testimages",skip_blank_lines=False, header=None,squeeze = True)
test_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\testlabels",skip_blank_lines=False, header=None)
# In[9]:
print(training_data[0:28], test_images[0:28])
# In[ ]:
# Each image in digit data is described as 28*28 pixel image. Which can be mapped
# In[10]:
def transform_data(data):
data_temp = data.copy()
for i in range(data_temp.shape[0]):
data_temp[i] = data_temp[i].replace(' ', '0').replace('#', '1').replace('+', '1')
data_temp = data_temp.apply(lambda x: pd.Series(list(x)))
data_temp = data_temp.apply(pd.to_numeric)
return data_temp
train_digit_data= transform_data(training_data)
test_digit_data=transform_data(test_images)
# In[12]:
# Selecting 4x4 pixel blocks and their number of color pixels(1) as features
def colorpixelcount(df, image_height, feature_size):
count_list=[]
for i in range(int(image_height/feature_size)):
for j in range(int(df.shape[1]/feature_size)):
count=0
for k in range((i*feature_size),(i*feature_size)+feature_size):
for l in range((j*feature_size),(j*feature_size)+feature_size):
if df.iloc[k,l]==1:
count=count+1
count_list.append(count)
return count_list
def perceptron_features(data_temp, image_height, feature_size):
feat_list=[]
re = 0
while re<int(len(data_temp)/image_height):
temp=colorpixelcount(data_temp.iloc[(image_height*re):(image_height*(re+1)),:].reset_index(drop=True), image_height, feature_size)
feat_list.append(temp)
re=re+1
return feat_list
# In[13]:
def perceptron_train(data_for_train, classes, iterations, labels,image_height, feature_size):
start_time = time.time()
perc_features = np.asarray(perceptron_features(data_for_train, image_height, feature_size))
weights = np.ones((classes,perc_features.shape[1]))
trained_vector = np.zeros((classes,1))
for _ in range(iterations):
for image_no in range(len(labels)):
temp_output = np.dot(perc_features[image_no], weights.T)
# get the highest predicted class value from the predicted
class_temp = np.where(max(temp_output) == temp_output)[0][0]
if class_temp != labels[image_no]:
weights[class_temp] -= perc_features[image_no]
weights[labels[image_no]] += perc_features[image_no]
end_time = time.time()
return weights, end_time-start_time
# In[14]:
def perceptron_test(test_digit_data,weights, test_labels, image_height, feature_size):
perc_test_features = np.asarray(perceptron_features(test_digit_data,image_height, feature_size))
labels = np.asarray(test_labels)
misclassified = 0
for image_no in range(len(test_labels)):
predict_array = np.dot(perc_test_features[image_no],weights.T)
class_temp = np.where(max(predict_array) == predict_array)[0][0]
if class_temp != labels[image_no][0]:
misclassified += 1
return (1 - misclassified/len(labels))*100
# accuracy = 1 - perceptron_test(weights,test_labels)
# accuracy
# In[176]:
def perceptron_digit(classes,image_height, feature_size):
ratio = np.arange(0.1,1.1,0.1)
accuracies = []
std_accuracies = []
times = []
for value in tqdm(ratio):
times_inner = []
accuracies_inner =[]
for _ in range(4):
num_samples = training_labels.shape[0]
data_random_samples = random.sample(range(num_samples), int(value * num_samples))
data_sample_range = [ range(i * image_height, (i+1)*image_height) for i in data_random_samples]
data_sample_range = list(chain(*data_sample_range))
data_for_train = train_digit_data.iloc[data_sample_range]
labels_for_train = (np.asarray(training_labels)[data_random_samples])
weights, time = perceptron_train(data_for_train,classes,3,labels_for_train, image_height, feature_size)
times_inner.append(time)
accuracies_inner.append(perceptron_test(test_digit_data,weights, test_labels, image_height, feature_size))
accuracies.append(round(np.mean(accuracies_inner),3))
std_accuracies.append(round(np.std(accuracies_inner),3))
times.append(round(np.mean(times_inner),3))
training_split = np.array([ 10 * i for i in range(1,11,1)])
final_results = pd.DataFrame(list(zip(training_split,accuracies, std_accuracies, times)),
columns = ['Training_split','Mean(Accuracy) (%)', 'Std(Accuracy) (%)','Training Time (sec)'])
plt.plot(ratio,accuracies )
plt.title("Training split vs Accuracy ")
plt.xlabel("Training Split")
plt.ylabel("Accuracy")
plt.show()
plt.plot(ratio,times )
plt.title("Training time vs Training split")
plt.xlabel("Training split")
plt.ylabel("Time for training")
plt.show()
return final_results
perceptron_digit(10,28,4)
# Face classification for Perceptron
# In[15]:
training_face_data = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\facedata\\facedatatrain",skip_blank_lines=False, header=None,squeeze = True)
training_face_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\facedata\\facedatatrainlabels",skip_blank_lines=False, header=None)
test_face_images = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\facedata\\facedatatest",skip_blank_lines=False, header=None,squeeze = True)
test_face_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\facedata\\facedatatestlabels",skip_blank_lines=False, header=None)
# validation_face_images = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\facedata\\facedatavalidation",skip_blank_lines=False, header=None,squeeze = True)
# validation_face_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\facedata\\facedatavalidationlabels",skip_blank_lines=False, header=None)
face_data_train = transform_data(training_face_data)
face_data_test = transform_data(test_face_images)
# In[16]:
print(face_data_train.shape)
print(training_face_labels.shape)
# In[17]:
def perceptron_face(classes,image_size, feature_size):
accuracies = []
std_accuracies = []
times = []
ratio = np.arange(0.1,1.05,0.1)
for value in tqdm(ratio):
times_inner = []
accuracies_inner =[]
for _ in range(4):
num_samples = training_face_labels.shape[0]
data_random_samples = random.sample(range(num_samples), int(value * num_samples))
data_sample_range = [ range(i * image_size, (i+1)*image_size) for i in data_random_samples]
data_sample_range = list(chain(*data_sample_range))
data_face_train = face_data_train.iloc[data_sample_range]
labels_for_train = (np.asarray(training_face_labels)[data_random_samples])
weights, time = perceptron_train(data_face_train,classes,1,labels_for_train, image_size, feature_size)
times_inner.append(time)
accuracies_inner.append(perceptron_test(face_data_test,weights, test_face_labels, image_size, feature_size))
accuracies.append(round(np.mean(accuracies_inner),3))
std_accuracies.append(round(np.std(np.array(accuracies_inner)),3))
times.append(round(np.mean(times_inner),3))
training_split = np.array([ 10 *i for i in range(1,11,1)])
final_results = pd.DataFrame(list(zip(training_split, accuracies, std_accuracies, times)),
columns = ['Training_ratio','Mean(Accuracy) %', 'Std(Accuracy) %', 'Training Time (sec)'])
plt.plot(ratio,accuracies )
plt.title("Training split vs Accuracy ")
plt.xlabel("Training Split")
plt.ylabel("Accuracy")
plt.show()
plt.plot(ratio,times )
plt.title("Training time vs Training split")
plt.xlabel("Training split")
plt.ylabel("Time for training")
plt.show()
return final_results
#
perceptron_face(2,70,2)
# In[ ]:
# In[18]:
training_data = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\trainingimages",skip_blank_lines=False, header=None,squeeze = True)
training_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\traininglabels",skip_blank_lines=False, header=None)
test_images = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\testimages",skip_blank_lines=False, header=None,squeeze = True)
test_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\testlabels",skip_blank_lines=False, header=None)
# validation_images = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\validationimages",skip_blank_lines=False, header=None,squeeze = True)
# validation_labels = pd.read_csv("C:\\Users\\<NAME>\\Desktop\\Project\\digitdata\\validationlabels",skip_blank_lines=False, header=None)
# In[19]:
train_digit_data= transform_data(training_data)
test_digit_data=transform_data(test_images)
# In[20]:
train_digit_data.shape
# We are not extracting the features separately here, which would mean each pixel is considered as an individual feature.
#
# In[21]:
def knn_digit(train_data,k,image_height):
train_shape = train_data.shape
test_shape = test_digit_data.shape
num_images = training_labels.shape[0]
testing_num_images = test_labels.shape[0]
# converting the testdata into numpy array and flattened for calculations
test_dig_np = np.zeros(shape = (testing_num_images, test_shape[1]*image_height))
for i in range(testing_num_images):
test_dig_np[i] = test_digit_data.iloc[i*image_height:(i+1)*image_height,:].to_numpy().flatten()
accuracy = []
accuracy_std = []
time_list = []
ratio = np.arange(0.1,1.05,0.1)
# flatten the training dataset
for split in tqdm(ratio):
accuracy_inner = []
time_inner = []
for _ in range(2):
train_dig_np = np.zeros(shape=(int((train_shape[0]/image_height)*split),train_shape[1]*image_height))
start_time = time.time()
num_samples = training_labels.shape[0]
data_random_samples = random.sample(range(num_samples), int(split * num_samples))
data_sample_range = [ range(i * image_height, (i+1)*image_height) for i in data_random_samples]
data_sample_range = list(chain(*data_sample_range))
train_temp_data = train_data.iloc[data_sample_range,:]
for i in range(int(num_images*split)):
train_dig_np[i] = train_temp_data[i*image_height:(i+1)*image_height].to_numpy().flatten()
error = 0
for image_no in range(testing_num_images):
each_image = test_digit_data[image_no*image_height:(image_no+1)*image_height]
each_image = each_image.to_numpy().flatten()
neighbours_vector = np.zeros(int(num_images*split))
# check each test image vector to each image in training dataset
for j in range(int(num_images*split)):
neighbours_vector[j] = ((each_image - train_dig_np[j])**2).sum()
neighbour_df = pd.DataFrame(neighbours_vector)
neighbour_df = neighbour_df[0].sort_values()[0:k]
class_labels = []
for class_index in neighbour_df.index:
class_labels.append(training_labels.iloc[data_random_samples[class_index],0])
predicted_class = pd.DataFrame(class_labels)
predicted_value = predicted_class[0].value_counts().idxmax()
# if the predicted class is not equal return training error
if predicted_value != test_labels.iloc[image_no][0]:
error += 1
# print(predicted_value, test_labels.iloc[image_no][0])
# print(error, testing_num_images)
end_time = time.time()
accuracy_inner.append(1 - error/testing_num_images)
time_inner.append(end_time - start_time)
accuracy.append( | np.mean(accuracy_inner) | numpy.mean |
# Differentiable plasticity: Omniglot task.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# You MUST download the Python version of the Omniglot dataset
# (https://github.com/brendenlake/omniglot), and move the 'omniglot-master'
# directory inside this directory.
# To get the results shown in the paper:
# python3 omniglot.py --nbclasses 5 --nbiter 5000000 --rule oja --activ tanh --stepsizelr 1000000 --prestime 1 --gamma .666 --alpha free --lr 3e-5
# Alternative (using a shared, though still learned alpha across all connections):
# python3 omniglot.py --nbclasses 5 --nbiter 5000000 --activ tanh --stepsizelr 1000000 --prestime 1 --gamma 0.3 --lr 1e-4 --alpha yoked
# Note that this code uses click rather than argparse for command-line
# parameter handling. I won't do that again.
import pdb
import torch
import torch.nn as nn
from torch.autograd import Variable
import click
import numpy as np
from numpy import random
import torch.nn.functional as F
from torch import optim
from torch.optim import lr_scheduler
import random
import sys
import pickle
import pdb
import time
import skimage
from skimage import transform
from skimage import io
import os
import platform
# Uber-only
#import OpusHdfsCopy
#from OpusHdfsCopy import transferFileToHdfsDir, checkHdfs
import numpy as np
import glob
np.set_printoptions(precision=4)
defaultParams = {
'activ': 'tanh', # 'tanh' or 'selu'
#'plastsize': 200,
'rule': 'hebb', # 'hebb' or 'oja'
'alpha': 'free', # 'free' of 'yoked' (if the latter, alpha is a single scalar learned parameter, shared across all connection)
'stepsizelr': 1e6, # How often should we change the learning rate?
'nbclasses': 5,
'gamma': .666, # The annealing factor of learning rate decay for Adam
'flare': 0, # Whether or not the ConvNet has more features in higher channels
'nbshots': 1, # Number of 'shots' in the few-shots learning
'prestime': 1,
'nbfeatures' : 64, # 128 is better (unsurprisingly) but we keep 64 for fair comparison with other reports
'prestimetest': 1,
'ipd': 0, # Inter-presentation delay
'imgsize': 31,
'nbiter': 5000000,
'lr': 3e-5,
'test_every': 500,
'save_every': 5000,
'rngseed':0
}
NBTESTCLASSES = 100
#ttype = torch.FloatTensor;
ttype = torch.cuda.FloatTensor;
# Generate the full list of inputs, labels, and the target label for an episode
def generateInputsLabelsAndTarget(params, imagedata, test=False):
#print(("Input Boost:", params['inputboost']))
#params['nbsteps'] = params['nbshots'] * ((params['prestime'] + params['ipd']) * params['nbclasses']) + params['prestimetest']
inputT = np.zeros((params['nbsteps'], 1, 1, params['imgsize'], params['imgsize'])) #inputTensor, initially in numpy format... Note dimensions: number of steps x batchsize (always 1) x NbChannels (also 1) x h x w
labelT = np.zeros((params['nbsteps'], 1, params['nbclasses'])) #labelTensor, initially in numpy format...
patterns=[]
if test:
cats = np.random.permutation(np.arange(len(imagedata) - NBTESTCLASSES, len(imagedata)))[:params['nbclasses']] # Which categories to use for this *testing* episode?
else:
cats = np.random.permutation(np.arange(len(imagedata) - NBTESTCLASSES))[:params['nbclasses']] # Which categories to use for this *training* episode?
#print("Test is", test, ", cats are", cats)
#cats = np.array(range(params['nbclasses'])) + 10
cats = np.random.permutation(cats)
#print(cats)
# We show one picture of each category, with labels, then one picture of one of these categories as a test, without label
# But each of the categories may undergo rotation by 0, 90, 180 or 270deg, for augmenting the dataset
# NOTE: We randomly assign one rotation to all the possible categories, not just the ones selected for the episode - it makes the coding simpler
rots = np.random.randint(4, size=len(imagedata))
#rots.fill(0)
testcat = random.choice(cats) # select the class on which we'll test in this episode
unpermcats = cats.copy()
# Inserting the character images and labels in the input tensor at the proper places
location = 0
for nc in range(params['nbshots']):
np.random.shuffle(cats) # Presentations occur in random order
for ii, catnum in enumerate(cats):
#print(catnum)
p = random.choice(imagedata[catnum])
for nr in range(rots[catnum]):
p = | np.rot90(p) | numpy.rot90 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.utils import data
import numpy as np
from functools import reduce
from egg.zoo.objects_game.util import compute_binomial
import itertools
import os
import pathlib
class VectorsLoader:
def __init__(self,
perceptual_dimensions=[4, 4, 4, 4, 4],
n_distractors=1,
batch_size=32,
train_samples=128000,
validation_samples=4096,
test_samples=1024,
shuffle_train_data=False,
dump_data_folder=None,
load_data_path =None,
seed=None):
self.perceptual_dimensions = perceptual_dimensions
self._n_features = len(self.perceptual_dimensions)
self.n_distractors = n_distractors
self.batch_size = batch_size
self.train_samples = train_samples
self.validation_samples = validation_samples
self.test_samples = test_samples
self.shuffle_train_data = shuffle_train_data
self.load_data_path = load_data_path
self.dump_data_folder = pathlib.Path(dump_data_folder) if dump_data_folder is not None else None
seed = seed if seed else np.random.randint(0, 2 ** 31)
self.random_state = np.random.RandomState(seed)
@property
def n_features(self):
return self._n_features
@n_features.setter
def n_features(self, n_features):
self._n_features = n_features
def upd_cl_options(self, opts):
opts.perceptual_dimensions = self.perceptual_dimensions
opts.train_samples = self.train_samples
opts.validation_samples = self.validation_samples
opts.test_samples = self.test_samples
opts.n_distractors = self.n_distractors
def load_data(self, data_file):
data = | np.load(data_file) | numpy.load |
from collections import OrderedDict
from datetime import date
import numpy as np
from Constants import Constants
from DCN_Experiments import DCN_Experiments
from PS_Manager import PS_Manager
from PS_Treated_Generator import PS_Treated_Generator
from TARNet_Experiments import TARNet_Experiments
from Utils import Utils
from dataloader import DataLoader
class Experiments:
def __init__(self, running_mode):
self.dL = DataLoader()
self.running_mode = running_mode
self.np_covariates_X_train = None
self.np_covariates_X_test = None
self.np_covariates_T_train = None
self.np_covariates_T_test = None
def run_all_experiments(self, train_path, test_path, iterations, ps_model_type):
device = Utils.get_device()
print(device)
results_list = []
run_parameters = self.__get_run_parameters()
print(str(run_parameters["summary_file_name"]))
file1 = open(run_parameters["summary_file_name"], "a")
for iter_id in range(iterations):
print("--" * 20)
print("iter_id: {0}".format(iter_id))
print("Jobs - NN")
print("--" * 20)
input_nodes = run_parameters["input_nodes"]
self.np_covariates_X_train, self.np_covariates_X_test, self.np_covariates_X_val, \
self.np_covariates_T_train, \
self.np_covariates_T_test, self.np_covariates_T_val \
= self.__load_data(train_path, test_path, iter_id)
# get propensity score for classifier training and testing
ps_score_list_train, ps_score_list_val, ps_score_list_test, ps_model = \
self.__get_ps_model(ps_model_type,
iter_id,
run_parameters["input_nodes"],
device)
run_parameters["consolidated_file_path"] = self.get_consolidated_file_name(ps_model_type)
print("--->>Train size: ")
data_loader_dict_train = self.dL.prepare_tensor_for_DCN(self.np_covariates_X_train,
self.np_covariates_T_train,
ps_score_list_train,
run_parameters["is_synthetic"])
print("--->>Validation size: ")
data_loader_dict_val = self.dL.prepare_tensor_for_DCN(self.np_covariates_X_val,
self.np_covariates_T_val,
ps_score_list_val,
run_parameters["is_synthetic"])
print("--->>Test size: ")
data_loader_dict_test = self.dL.prepare_tensor_for_DCN(self.np_covariates_X_test,
self.np_covariates_T_test,
ps_score_list_test,
run_parameters["is_synthetic"])
n_treated_original = data_loader_dict_train["treated_data"][0].shape[0]
n_control_original = data_loader_dict_train["control_data"][0].shape[0]
# Execute PM GAN
ps_t = PS_Treated_Generator(data_loader_dict_train, data_loader_dict_val, ps_model, ps_model_type)
balanced_dataset_dict = ps_t.simulate_treated_semi_supervised(input_nodes, iter_id, device)
tensor_treated_balanced_dcn = balanced_dataset_dict["tensor_treated_balanced_dcn"]
tensor_control_balanced_dcn = balanced_dataset_dict["tensor_control_balanced_dcn"]
n_treated_balanced_dcn = balanced_dataset_dict["n_treated_balanced_dcn"]
n_control_balanced_dcn = balanced_dataset_dict["n_control_balanced_dcn"]
tensor_balanced_tarnet = balanced_dataset_dict["tensor_balanced_tarnet"]
n_total_balanced_tarnet = balanced_dataset_dict["n_total_balanced_tarnet"]
n_treated_balanced_tarnet = balanced_dataset_dict["n_treated_balanced_tarnet"]
print("---" * 20)
print("-----------> !! Supervised Training(DCN Models ) !!<-----------")
# run DCN Models
tensor_treated_train_original = \
Utils.create_tensors_from_tuple(data_loader_dict_train["treated_data"])
tensor_control_train_original = \
Utils.create_tensors_from_tuple(data_loader_dict_train["control_data"])
model_save_paths = {
"Model_DCN_PD_shared": run_parameters["Model_DCN_PD_shared"].format(iter_id),
"Model_DCN_PD_y1": run_parameters["Model_DCN_PD_y1"].format(iter_id),
"Model_DCN_PD_y0": run_parameters["Model_DCN_PD_y0"].format(iter_id),
"Model_DCN_PD_02_shared": run_parameters["Model_DCN_PD_02_shared"].format(iter_id),
"Model_DCN_PD_02_y1": run_parameters["Model_DCN_PD_02_y1"].format(iter_id),
"Model_DCN_PD_02_y0": run_parameters["Model_DCN_PD_02_y0"].format(iter_id),
"Model_DCN_PD_05_shared": run_parameters["Model_DCN_PD_05_shared"].format(iter_id),
"Model_DCN_PD_05_y1": run_parameters["Model_DCN_PD_05_y1"].format(iter_id),
"Model_DCN_PD_05_y0": run_parameters["Model_DCN_PD_05_y0"].format(iter_id),
"Model_DCN_PM_GAN_shared": run_parameters["Model_DCN_PM_GAN_shared"].format(iter_id),
"Model_DCN_PM_GAN_y1": run_parameters["Model_DCN_PM_GAN_y1"].format(iter_id),
"Model_DCN_PM_GAN_y0": run_parameters["Model_DCN_PM_GAN_y0"].format(iter_id),
"Model_DCN_PM_GAN_02_shared": run_parameters["Model_DCN_PM_GAN_02_shared"].format(iter_id),
"Model_DCN_PM_GAN_02_y1": run_parameters["Model_DCN_PM_GAN_02_y1"].format(iter_id),
"Model_DCN_PM_GAN_02_y0": run_parameters["Model_DCN_PM_GAN_02_y0"].format(iter_id),
"Model_DCN_PM_GAN_05_shared": run_parameters["Model_DCN_PM_GAN_05_shared"].format(iter_id),
"Model_DCN_PM_GAN_05_y1": run_parameters["Model_DCN_PM_GAN_05_y1"].format(iter_id),
"Model_DCN_PM_GAN_05_y0": run_parameters["Model_DCN_PM_GAN_05_y0"].format(iter_id),
"Model_DCN_PM_GAN_PD_shared": run_parameters["Model_DCN_PM_GAN_PD_shared"].format(iter_id),
"Model_DCN_PM_GAN_PD_y1": run_parameters["Model_DCN_PM_GAN_PD_y1"].format(iter_id),
"Model_DCN_PM_GAN_PD_y0": run_parameters["Model_DCN_PM_GAN_PD_y0"].format(iter_id)
}
dcn_experiments = DCN_Experiments(input_nodes, device)
dcn_pd_models_eval_dict = dcn_experiments.evaluate_DCN_Model(tensor_treated_train_original,
tensor_control_train_original,
n_treated_original,
n_control_original,
tensor_treated_balanced_dcn,
tensor_control_balanced_dcn,
n_treated_balanced_dcn,
n_control_balanced_dcn,
data_loader_dict_val,
data_loader_dict_test,
model_save_paths)
print("---" * 20)
print("-----------> !! Supervised Evaluation(DCN Models) !! <-----------")
print("---" * 20)
print("--> 1. Model 1: DCN - PD Supervised Training Evaluation: ")
dcn_pd_eval = dcn_pd_models_eval_dict["dcn_pd_eval_dict"]
dcn_pd_ate_pred, dcn_pd_att_pred, dcn_pd_bias_att, dcn_pd_atc_pred, dcn_pd_policy_value, \
dcn_pd_policy_risk, dcn_pd_err_fact = \
self.__process_evaluated_metric(
dcn_pd_eval["yf_list"],
dcn_pd_eval["e_list"],
dcn_pd_eval["T_list"],
dcn_pd_eval["y1_hat_list"],
dcn_pd_eval["y0_hat_list"],
dcn_pd_eval["ITE_dict_list"],
dcn_pd_eval["predicted_ITE"],
run_parameters["DCN_PD"],
iter_id)
print("---" * 20)
print("--> 2. Model 2: DCN - PD(Dropout 0.5) Supervised Training Evaluation: ")
dcn_pd_05_eval_dict = dcn_pd_models_eval_dict["dcn_pd_05_eval_dict"]
dcn_pd_05_ate_pred, dcn_pd_05_att_pred, dcn_pd_05_bias_att, dcn_pd_05_atc_pred, \
dcn_pd_05_policy_value, \
dcn_pd_05_policy_risk, dcn_pd_05_err_fact = \
self.__process_evaluated_metric(
dcn_pd_05_eval_dict["yf_list"],
dcn_pd_05_eval_dict["e_list"],
dcn_pd_05_eval_dict["T_list"],
dcn_pd_05_eval_dict["y1_hat_list"],
dcn_pd_05_eval_dict["y0_hat_list"],
dcn_pd_05_eval_dict["ITE_dict_list"],
dcn_pd_05_eval_dict["predicted_ITE"],
run_parameters["DCN_PD_05"],
iter_id)
print("---" * 20)
print("--> 3. Model 3: PM GAN - No dropout Supervised Training Evaluation: ")
dcn_pm_gan_eval = dcn_pd_models_eval_dict["dcn_pm_gan_eval_dict"]
dcn_pm_gan_ate_pred, dcn_pm_gan_att_pred, dcn_pm_gan_bias_att, dcn_pm_gan_atc_pred, \
dcn_pm_gan_policy_value, dcn_pm_gan_policy_risk, dcn_pm_gan_err_fact = \
self.__process_evaluated_metric(
dcn_pm_gan_eval["yf_list"],
dcn_pm_gan_eval["e_list"],
dcn_pm_gan_eval["T_list"],
dcn_pm_gan_eval["y1_hat_list"],
dcn_pm_gan_eval["y0_hat_list"],
dcn_pm_gan_eval["ITE_dict_list"],
dcn_pm_gan_eval["predicted_ITE"],
run_parameters["DCN_PM_GAN"],
iter_id)
print("---" * 20)
print("--> 4. Model 4: PM GAN - dropout 0.5 Supervised Training Evaluation: ")
dcn_pm_gan_eval_05 = dcn_pd_models_eval_dict["dcn_pm_gan_eval_drp_05_dict"]
dcn_pm_gan_05_ate_pred, dcn_pm_gan_05_att_pred, dcn_pm_gan_05_bias_att, dcn_pm_gan_05_atc_pred, \
dcn_pm_gan_05_policy_value, dcn_pm_gan_05_policy_risk, dcn_pm_gan_05_err_fact = \
self.__process_evaluated_metric(
dcn_pm_gan_eval_05["yf_list"],
dcn_pm_gan_eval_05["e_list"],
dcn_pm_gan_eval_05["T_list"],
dcn_pm_gan_eval_05["y1_hat_list"],
dcn_pm_gan_eval_05["y0_hat_list"],
dcn_pm_gan_eval_05["ITE_dict_list"],
dcn_pm_gan_eval_05["predicted_ITE"],
run_parameters["DCN_PM_GAN_05"],
iter_id)
print("---" * 20)
print("--> 5. Model 5: PM GAN - PD Supervised Training Evaluation: ")
dcn_pm_gan_eval_pd = dcn_pd_models_eval_dict["dcn_pm_gan_eval_pd_dict"]
dcn_pm_gan_pd_ate_pred, dcn_pm_gan_pd_att_pred, dcn_pm_gan_pd_bias_att, dcn_pm_gan_pd_atc_pred, \
dcn_pm_gan_pd_policy_value, dcn_pm_gan_pd_policy_risk, dcn_pm_gan_pd_err_fact = \
self.__process_evaluated_metric(
dcn_pm_gan_eval_pd["yf_list"],
dcn_pm_gan_eval_pd["e_list"],
dcn_pm_gan_eval_pd["T_list"],
dcn_pm_gan_eval_pd["y1_hat_list"],
dcn_pm_gan_eval_pd["y0_hat_list"],
dcn_pm_gan_eval_pd["ITE_dict_list"],
dcn_pm_gan_eval_pd["predicted_ITE"],
run_parameters["DCN_PM_GAN_PD"],
iter_id)
print("---" * 20)
print("---" * 20)
# run TARNet Models
print("-----------> !! Supervised Training(TARNet Models) !!<-----------")
tarnet_experiments = TARNet_Experiments(input_nodes, device)
tarnet_experiments_models_eval_dict = tarnet_experiments.evaluate_TARNet_Model(
data_loader_dict_train["treated_data"],
data_loader_dict_train["control_data"],
tensor_balanced_tarnet,
data_loader_dict_val,
data_loader_dict_test,
n_total_balanced_tarnet,
n_treated_balanced_tarnet)
print("---" * 20)
print("---> !! Supervised Evaluation(TARNet Models) !! <---")
print("---" * 20)
print("--> 1. Model 1: TARNet Supervised Training Evaluation: ")
tarnet_eval = tarnet_experiments_models_eval_dict["tarnet_eval_dict"]
tarnet_ate_pred, tarnet_att_pred, tarnet_bias_att, tarnet_atc_pred, \
tarnet_policy_value, tarnet_policy_risk, tarnet_err_fact = \
self.__process_evaluated_metric(
tarnet_eval["yf_list"],
tarnet_eval["e_list"],
tarnet_eval["T_list"],
tarnet_eval["y1_hat_list"],
tarnet_eval["y0_hat_list"],
tarnet_eval["ITE_dict_list"],
tarnet_eval["predicted_ITE"],
run_parameters["TARNET"],
iter_id)
print("--> 2. Model 2: TARNet PM GAN Supervised Training Evaluation: ")
tarnet_pm_gan_eval = tarnet_experiments_models_eval_dict["tarnet_pm_gan_eval_dict"]
tarnet_pm_gan_ate_pred, tarnet_pm_gan_att_pred, tarnet_pm_gan_bias_att, tarnet_pm_gan_atc_pred, \
tarnet_pm_gan_policy_value, tarnet_pm_gan_policy_risk, tarnet_pm_gan_err_fact = \
self.__process_evaluated_metric(
tarnet_pm_gan_eval["yf_list"],
tarnet_pm_gan_eval["e_list"],
tarnet_pm_gan_eval["T_list"],
tarnet_pm_gan_eval["y1_hat_list"],
tarnet_pm_gan_eval["y0_hat_list"],
tarnet_pm_gan_eval["ITE_dict_list"],
tarnet_pm_gan_eval["predicted_ITE"],
run_parameters["TARNET"],
iter_id)
print("---" * 20)
result_dict = OrderedDict()
result_dict["iter_id"] = iter_id
result_dict["dcn_pd_ate_pred"] = dcn_pd_ate_pred
result_dict["dcn_pd_att_pred"] = dcn_pd_att_pred
result_dict["dcn_pd_bias_att"] = dcn_pd_bias_att
result_dict["dcn_pd_atc_pred"] = dcn_pd_atc_pred
result_dict["dcn_pd_policy_value"] = dcn_pd_policy_value
result_dict["dcn_pd_policy_risk"] = dcn_pd_policy_risk
result_dict["dcn_pd_err_fact"] = dcn_pd_err_fact
result_dict["dcn_pd_05_ate_pred"] = dcn_pd_05_ate_pred
result_dict["dcn_pd_05_att_pred"] = dcn_pd_05_att_pred
result_dict["dcn_pd_05_bias_att"] = dcn_pd_05_bias_att
result_dict["dcn_pd_05_atc_pred"] = dcn_pd_05_atc_pred
result_dict["dcn_pd_05_policy_value"] = dcn_pd_05_policy_value
result_dict["dcn_pd_05_policy_risk"] = dcn_pd_05_policy_risk
result_dict["dcn_pd_05_err_fact"] = dcn_pd_05_err_fact
result_dict["dcn_pm_gan_ate_pred"] = dcn_pm_gan_ate_pred
result_dict["dcn_pm_gan_att_pred"] = dcn_pm_gan_att_pred
result_dict["dcn_pm_gan_bias_att"] = dcn_pm_gan_bias_att
result_dict["dcn_pm_gan_atc_pred"] = dcn_pm_gan_atc_pred
result_dict["dcn_pm_gan_policy_value"] = dcn_pm_gan_policy_value
result_dict["dcn_pm_gan_policy_risk"] = dcn_pm_gan_policy_risk
result_dict["dcn_pm_gan_err_fact"] = dcn_pm_gan_err_fact
result_dict["dcn_pm_gan_05_att_pred"] = dcn_pm_gan_05_ate_pred
result_dict["dcn_pm_gan_05_att_pred"] = dcn_pm_gan_05_att_pred
result_dict["dcn_pm_gan_05_bias_att"] = dcn_pm_gan_05_bias_att
result_dict["dcn_pm_gan_05_atc_pred"] = dcn_pm_gan_05_atc_pred
result_dict["dcn_pm_gan_05_policy_value"] = dcn_pm_gan_05_policy_value
result_dict["dcn_pm_gan_05_policy_risk"] = dcn_pm_gan_05_policy_risk
result_dict["dcn_pm_gan_05_err_fact"] = dcn_pm_gan_05_err_fact
result_dict["dcn_pm_gan_pd_att_pred"] = dcn_pm_gan_pd_ate_pred
result_dict["dcn_pm_gan_pd_att_pred"] = dcn_pm_gan_pd_att_pred
result_dict["dcn_pm_gan_pd_bias_att"] = dcn_pm_gan_pd_bias_att
result_dict["dcn_pm_gan_pd_atc_pred"] = dcn_pm_gan_pd_atc_pred
result_dict["dcn_pm_gan_pd_policy_value"] = dcn_pm_gan_pd_policy_value
result_dict["dcn_pm_gan_pd_policy_risk"] = dcn_pm_gan_pd_policy_risk
result_dict["dcn_pm_gan_pd_err_fact"] = dcn_pm_gan_pd_err_fact
result_dict["tarnet_ate_pred"] = tarnet_ate_pred
result_dict["tarnet_att_pred"] = tarnet_att_pred
result_dict["tarnet_bias_att"] = tarnet_bias_att
result_dict["tarnet_atc_pred"] = tarnet_atc_pred
result_dict["tarnet_policy_value"] = tarnet_policy_value
result_dict["tarnet_policy_risk"] = tarnet_policy_risk
result_dict["tarnet_err_fact"] = tarnet_err_fact
result_dict["tarnet_pm_gan_ate_pred"] = tarnet_pm_gan_ate_pred
result_dict["tarnet_pm_gan_att_pred"] = tarnet_pm_gan_att_pred
result_dict["tarnet_pm_gan_bias_att"] = tarnet_pm_gan_bias_att
result_dict["tarnet_pm_gan_atc_pred"] = tarnet_pm_gan_atc_pred
result_dict["tarnet_pm_gan_policy_value"] = tarnet_pm_gan_policy_value
result_dict["tarnet_pm_gan_policy_risk"] = tarnet_pm_gan_policy_risk
result_dict["tarnet_pm_gan_err_fact"] = tarnet_pm_gan_err_fact
file1.write("\nToday's date: {0}\n".format(date.today()))
file1.write("Iter: {0}, bias_att_DCN_PD: {1}, bias_att_DCN_PD(0.5): {2}, "
"bias_att_DCN_PM_GAN: {3}, "
"bias_att_DCN_PM_GAN_05: {4}, bias_att_DCN_PM_GAN(PD): {5}, "
"policy_risk_DCN_PD: {6}, "
"policy_risk_DCN_PD(0.5): {7}, policy_risk_DCN_PM_GAN: {8}, "
"policy_risk_PM_GAN_05: {9}, policy_risk_PM_GAN(PD): {10}, "
.format(iter_id, dcn_pd_bias_att,
dcn_pd_05_bias_att,
dcn_pm_gan_bias_att,
dcn_pm_gan_05_bias_att, dcn_pm_gan_pd_bias_att,
dcn_pd_policy_risk, dcn_pd_05_policy_risk,
dcn_pm_gan_policy_risk,
dcn_pm_gan_05_policy_risk,
dcn_pm_gan_pd_policy_risk))
results_list.append(result_dict)
bias_att_set_DCN_PD = []
policy_risk_set_DCN_PD = []
bias_att_set_DCN_PD_05 = []
policy_risk_set_DCN_PD_05 = []
bias_att_DCN_PM_GAN = []
policy_risk_set_DCN_PM_GAN = []
bias_att_DCN_PM_GAN_05 = []
policy_risk_set_DCN_PM_GAN_05 = []
bias_att_DCN_PM_GAN_PD = []
policy_risk_set_DCN_PM_GAN_PD = []
bias_att_tarnet = []
policy_risk_set_tarnet = []
bias_att_tarnet_PM_GAN = []
policy_risk_set_tarnet_PM_GAN = []
for result in results_list:
bias_att_set_DCN_PD.append(result["dcn_pd_bias_att"])
policy_risk_set_DCN_PD.append(result["dcn_pd_policy_risk"])
bias_att_set_DCN_PD_05.append(result["dcn_pd_05_bias_att"])
policy_risk_set_DCN_PD_05.append(result["dcn_pd_05_policy_risk"])
bias_att_DCN_PM_GAN.append(result["dcn_pm_gan_bias_att"])
policy_risk_set_DCN_PM_GAN.append(result["dcn_pm_gan_policy_risk"])
bias_att_DCN_PM_GAN_05.append(result["dcn_pm_gan_05_bias_att"])
policy_risk_set_DCN_PM_GAN_05.append(result["dcn_pm_gan_05_policy_risk"])
bias_att_DCN_PM_GAN_PD.append(result["dcn_pm_gan_pd_bias_att"])
policy_risk_set_DCN_PM_GAN_PD.append(result["dcn_pm_gan_pd_policy_risk"])
bias_att_tarnet.append(result["tarnet_bias_att"])
policy_risk_set_tarnet.append(result["tarnet_policy_risk"])
bias_att_tarnet_PM_GAN.append(result["tarnet_pm_gan_bias_att"])
policy_risk_set_tarnet_PM_GAN.append(result["tarnet_pm_gan_policy_risk"])
bias_att_DCN_PD_mean = np.mean( | np.array(bias_att_set_DCN_PD) | numpy.array |
import os.path
from pathlib import Path
from ctypes import *
from sys import platform
from numpy.ctypeslib import ndpointer
import numpy as np
from PIL import Image
try:
import eyeRendererHelperFunctions as eyeTools
except Exception as e:
print("Error importing eyeTools:", e)
print("This is most likely because you do not have the 'python-examples' folder set as a path in $PYTHONPATH.")
exit()
def getIdFromMap(mapImage, x, y):
r = mapImage[y,x,0] << 24 # Red
g = mapImage[y,x,1] << 16 # Green
b = mapImage[y,x,2] << 8 # Blue
a = mapImage[y,x,3] # Alpha
idOut = r | g | b | a
return(idOut)
def getProjectionImageUsingMap(vector, vectorMax, idMap, pjWidth,pjHeight):
np.copy(idMap)
output = np.zeros((pjWidth, pjHeight), dtype=np.uint8)
for x in range(pjWidth):
for y in range(pjHeight):
pixelId = getIdFromMap(idMap, x, y)
output[x,y] = int(vector[pixelId]/vectorMax * 255)
return(output)
try:
# Load the renderer
eyeRenderer = CDLL("../../build/make/lib/libEyeRenderer3.so")
print("Successfully loaded", eyeRenderer)
# Configure the renderer's function outputs and inputs using the helper functions
eyeTools.configureFunctions(eyeRenderer)
#Load a scene
print("Loading scene (please wait)...")
eyeRenderer.loadGlTFscene(c_char_p(b"../../data/natural-standin-sky.gltf"))
print("Scene loaded!")
# Make sure there's a place to save to
Path("output/generated-data/alias-demo-quantified/").mkdir(parents=True, exist_ok=True)
Path("output/vector-data/").mkdir(parents=True, exist_ok=True)
Path("output/view-images/").mkdir(parents=True, exist_ok=True)
Path("output/generated-data/spread-analysis/").mkdir(parents=True, exist_ok=True)
###### First, generate the ommatidial id map
#Resize the renderer display in order to render the spherically-projected variable sample rate
renderWidth = 700
renderHeight = 300
eyeTools.setRenderSize(eyeRenderer, renderWidth, renderHeight)
# Go to the 'insect-eye-spherical-projector' camera
eyeRenderer.gotoCameraByName(c_char_p(b"insect-eye-spherical-projector-ids"))
eyeRenderer.renderFrame() # Just straight-up render the spherical projector ids
idMap = np.copy(np.flipud(eyeRenderer.getFramePointer())) # Copy (remember here the data is still owned by the render, so we need this copy) the id map (plus flip it the right way up)
eyeRenderer.saveFrameAs(c_char_p(("output/generated-data/alias-demo-quantified/projection-ids.ppm").encode())) # Save the image for sanity checking
# Also generate a set of weights that store how much of an influence on an
# average each compound eye should have based on it's area coverage in steradians
perSteradianWeights = [1.0/i.getSolidAngle() for i in eyeTools.readEyeFile("../../data/eyes/1000-horizontallyAcute-variableDegree.eye")]
perSteradianWeights = np.asarray(perSteradianWeights)
###### Second, generate ommatidial sample data into a big multi-dim array to perform analysis on
# Change to vector rendering
eyeRenderer.gotoCameraByName(c_char_p(b"insect-eye-fast-vector"))
# Prepare to generate vector data (1000 ommatidia)
vectorWidth = 1000
maxOmmatidialSamples = renderWidth # The upper bound of how many samples will be taken per ommatidium in the analysis
spreadSampleCount = 1000 # How many times each frame is rendered to get a sense of the spread of results from a given ommatidium at different sampling rates
eyeTools.setRenderSize(eyeRenderer, vectorWidth, 1)
# Create a numpy array to store the eye data
# This is a set of eye matricies, each one being a 1st-order stack of samples (the width of the number of ommatidia, and 3 channels deep)
eyeSampleMatrix = np.zeros((maxOmmatidialSamples,spreadSampleCount, vectorWidth, 3), dtype=np.uint8)
# Iterate over eye sample counts
for idx, samples in enumerate(range(1, maxOmmatidialSamples+1)):
eyeRenderer.setCurrentEyeSamplesPerOmmatidium(samples)
eyeRenderer.renderFrame() # First call to ensure randoms are configured
# For each sample count, generate N images to compare
for i in range(spreadSampleCount):
renderTime = eyeRenderer.renderFrame() # Second call to actually render the image
# Retrieve the data
frameData = eyeRenderer.getFramePointer()
frameDataRGB = frameData[:,:,:3] # Remove the alpha channel
eyeSampleMatrix[idx,i,:,:] = np.copy(frameDataRGB[:, :, :])
eyeRenderer.displayFrame()
###### Now calculate the per-ommatidial sample variance and standard deviation at each sample rate
print("")
maxSd = 0
maxVariance = 0
variances = np.zeros((renderWidth, vectorWidth, 1))
standardDeviations = np.zeros((renderWidth, vectorWidth, 1))
avgVariancePerSteradianPerImage = np.zeros(renderWidth)
avgSdPerSteradianPerImage = np.zeros(renderWidth)
for sampleCount, ommatidialSamples in enumerate(eyeSampleMatrix):
# Get the per-ommatidial spread here
meanImage = np.mean(ommatidialSamples, axis=0) # The means of each ommatidium (RGB)
summedSquaredDifferences = np.zeros((meanImage.shape[0],1))
for ommatidiumId, image in enumerate(ommatidialSamples):
print("\rCalculating per-ommatidial spread at each sample rate (Image: {}, Sample: {})...".format(sampleCount+1, ommatidiumId+1), end='')
for indexInMean, pixelInImage in enumerate(image):
difference = np.linalg.norm(pixelInImage - meanImage[indexInMean,:])
difference = difference*difference
summedSquaredDifferences[indexInMean] += difference
varianceImage = summedSquaredDifferences / (len(ommatidialSamples)-1)
sdImage = np.sqrt(varianceImage)
variances[sampleCount] = varianceImage
standardDeviations[sampleCount] = sdImage
# Keep track of the maximum variance and standard deviation
maxVariance = max(maxVariance, np.max(varianceImage))
maxSd = max(maxSd, | np.max(sdImage) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:LinearRegression
Description : 使用 sklearn 的 iris 数据,x: 花瓣宽度 y: 花瓣长度, 大致满足 线性关系
Email : <EMAIL>
Date:2017/12/15
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.datasets import load_iris
from torch.autograd import Variable
iris = load_iris()
# 将 y 统一为矩阵的形式
X = np.array([[d[3]] for d in iris.data])
y = | np.array([[d[0]] for d in iris.data]) | numpy.array |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.engine import base_layer_utils
from keras.layers.rnn import gru
from keras.layers.rnn import gru_v1
from keras.layers.rnn import lstm
from keras.layers.rnn import lstm_v1
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import generic_utils
# isort: off
from tensorflow.python.training.tracking import (
util as trackable_util,
)
# Used for nested input/output/state RNN test.
NestedInput = collections.namedtuple("NestedInput", ["t1", "t2"])
NestedState = collections.namedtuple("NestedState", ["s1", "s2"])
@test_combinations.run_all_keras_modes
class RNNTest(test_combinations.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32),
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell:
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units))
)
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16),
]
layer = keras.layers.RNN(cells)
self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32)))
self.assertEqual(layer.cell.output_size, 32)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super().__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, [output]
def get_config(self):
config = {"units": self.units}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(
{"MinimalRNNCell": MinimalRNNCell}
):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_minimal_rnn_cell_abstract_rnn_cell(self):
class MinimalRNNCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super().__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, output
@property
def output_size(self):
return self.units
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8), MinimalRNNCell(16), MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_with_time_major(self):
batch = 10
time_step = 5
embedding_dim = 4
units = 3
# Test basic case.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
layer = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)
self.assertEqual(
layer.compute_output_shape(
(time_step, None, embedding_dim)
).as_list(),
[time_step, None, units],
)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, units))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
# Test stacking.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: tf.transpose(t, [1, 0, 2])
)(x)
cell_units = [10, 8, 6]
cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)]
layer = keras.layers.RNN(cells, time_major=True, return_sequences=True)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1]))
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, cell_units[-1])),
)
# Test masking.
x = keras.Input((time_step, embedding_dim))
time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(
x
)
mask = keras.layers.Masking()(time_major)
rnn = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)(mask)
y = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(rnn)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
# Test layer output
x = keras.Input((time_step, embedding_dim))
rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True)
y = rnn_1(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)),
)
x_np = np.random.random((batch, time_step, embedding_dim))
y_np_1 = model.predict(x_np)
time_major = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(
x
)
rnn_2 = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True
)
y_2 = rnn_2(time_major)
y_2 = keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2]))(y_2)
model_2 = keras.models.Model(x, y_2)
rnn_2.set_weights(rnn_1.get_weights())
y_np_2 = model_2.predict(x_np)
self.assertAllClose(y_np_1, y_np_2, atol=1e-4)
def test_rnn_cell_with_constants_layer(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {"RNNCellWithConstants": RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# test flat list inputs.
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test GRUCell reset_after property.
x = keras.Input((None, 5))
c = keras.Input((3,))
cells = [gru.GRUCell(32, reset_after=True)]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32))
)
# Test stacked RNN serialization
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_rnn_cell_with_non_keras_constants(self):
# Test basic case.
x = keras.Input((None, 5))
c = tf.zeros([6, 3], dtype=tf.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_cell_with_constants_layer_passing_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32)),
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {"RNNCellWithConstants": RNNCellWithConstants}
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10.0, c_np])
with self.assertRaises(AssertionError):
self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with generic_utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_non_keras_constants_and_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = tf.zeros([6, 3], dtype=tf.float32)
s = tf.zeros([6, 32], dtype=tf.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [
gru.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3),
]
layer = keras.layers.RNN(cells)
s = [
tf.zeros([6, 8], dtype=tf.float32),
tf.zeros([6, 12], dtype=tf.float32),
tf.zeros([6, 32], dtype=tf.float32),
]
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_stacked_rnn_attributes(self):
if tf.executing_eagerly():
self.skipTest("reduce_sum is not available in eager mode.")
cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
layer.build((None, None, 1))
# Test weights
self.assertEqual(len(layer.trainable_weights), 6)
cells[0].trainable = False
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
# Test `get_losses_for` and `losses`
x = keras.Input((None, 1))
loss_1 = tf.reduce_sum(x)
loss_2 = tf.reduce_sum(cells[0].kernel)
cells[0].add_loss(loss_1, inputs=x)
cells[0].add_loss(loss_2)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(layer.get_losses_for(None), [loss_2])
self.assertEqual(layer.get_losses_for(x), [loss_1])
# Test `updates`
cells = [keras.layers.LSTMCell(1), keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
x = keras.Input((None, 1))
_ = layer(x)
update_1 = tf.compat.v1.assign_add(
cells[0].kernel, x[0, 0, 0] * cells[0].kernel
)
update_2 = tf.compat.v1.assign_add(
cells[0].kernel, tf.ones_like(cells[0].kernel)
)
# TODO(b/128682878): Remove when RNNCells are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
cells[0].add_update(update_1)
cells[0].add_update(update_2)
self.assertEqual(len(layer.updates), 2)
def test_rnn_dynamic_trainability(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
layer = layer_class(units)
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
@parameterized.parameters(
[keras.layers.SimpleRNN, keras.layers.GRU, keras.layers.LSTM]
)
def test_rnn_cell_trainability(self, layer_cls):
# https://github.com/tensorflow/tensorflow/issues/32369.
layer = layer_cls(3, trainable=False)
self.assertFalse(layer.cell.trainable)
layer.trainable = True
self.assertTrue(layer.cell.trainable)
def test_state_reuse_with_dropout(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
timesteps = 2
num_samples = 2
input1 = keras.Input(
batch_shape=(num_samples, timesteps, embedding_dim)
)
layer = layer_class(
units, return_state=True, return_sequences=True, dropout=0.2
)
state = layer(input1)[1:]
input2 = keras.Input(
batch_shape=(num_samples, timesteps, embedding_dim)
)
output = layer_class(units)(input2, initial_state=state)
model = keras.Model([input1, input2], output)
inputs = [
np.random.random((num_samples, timesteps, embedding_dim)),
np.random.random((num_samples, timesteps, embedding_dim)),
]
model.predict(inputs)
def test_builtin_and_custom_rnn_cell_serialization(self):
@keras.utils.generic_utils.register_keras_serializable(
package="TestOnly"
)
class CustomRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super().__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="uniform",
name="kernel",
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer="uniform",
name="recurrent_kernel",
)
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(
prev_output, self.recurrent_kernel
)
return output, [output]
def get_config(self):
config = {"units": self.units}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
for cell_class in [
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
CustomRNNCell,
]:
# Test basic case.
x = keras.Input((None, 5))
cell = cell_class(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [cell_class(8), cell_class(12), cell_class(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer=[
keras.layers.SimpleRNN,
gru_v1.GRU,
lstm_v1.LSTM,
gru.GRU,
lstm.LSTM,
],
unroll=[True, False],
)
)
def test_rnn_dropout(self, layer, unroll):
rnn_layer = layer(3, dropout=0.1, recurrent_dropout=0.1, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = rnn_layer(x)
model = keras.models.Model(x, y)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
cell=[
keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell,
],
unroll=[True, False],
)
)
def test_stacked_rnn_dropout(self, cell, unroll):
cells = [
cell(3, dropout=0.1, recurrent_dropout=0.1),
cell(3, dropout=0.1, recurrent_dropout=0.1),
]
layer = keras.layers.RNN(cells, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = layer(x)
model = keras.models.Model(x, y)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
def test_dropout_mask_reuse(self):
# The layer is created with recurrent_initializer = zero, so that the
# the recurrent state won't affect the output. By doing this, we can
# verify the output and see if the same mask is applied to for each
# timestep.
layer_1 = keras.layers.SimpleRNN(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
return_sequences=True,
unroll=True,
)
layer_2 = keras.layers.RNN(
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
return_sequences=True,
unroll=True,
)
layer_3 = keras.layers.RNN(
[
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
keras.layers.SimpleRNNCell(
3,
dropout=0.5,
kernel_initializer="ones",
recurrent_initializer="zeros",
),
],
return_sequences=True,
unroll=True,
)
def verify(rnn_layer):
inputs = tf.constant(1.0, shape=(6, 2, 5))
out = rnn_layer(inputs, training=True)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
batch_1 = self.evaluate(out)
batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :]
self.assertAllClose(batch_1_t0, batch_1_t1)
# This simulate the layer called with multiple batches in eager mode
if tf.executing_eagerly():
out2 = rnn_layer(inputs, training=True)
else:
out2 = out
batch_2 = self.evaluate(out2)
batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :]
self.assertAllClose(batch_2_t0, batch_2_t1)
# Also validate that different dropout is used by between batches.
self.assertNotAllClose(batch_1_t0, batch_2_t0)
self.assertNotAllClose(batch_1_t1, batch_2_t1)
for l in [layer_1, layer_2, layer_3]:
verify(l)
def test_stacked_rnn_compute_output_shape(self):
cells = [keras.layers.LSTMCell(3), keras.layers.LSTMCell(6)]
embedding_dim = 4
timesteps = 2
layer = keras.layers.RNN(
cells, return_state=True, return_sequences=True
)
output_shape = layer.compute_output_shape(
(None, timesteps, embedding_dim)
)
expected_output_shape = [
(None, timesteps, 6),
(None, 3),
(None, 3),
(None, 6),
(None, 6),
]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape], expected_output_shape
)
# Test reverse_state_order = True for stacked cell.
stacked_cell = keras.layers.StackedRNNCells(
cells, reverse_state_order=True
)
layer = keras.layers.RNN(
stacked_cell, return_state=True, return_sequences=True
)
output_shape = layer.compute_output_shape(
(None, timesteps, embedding_dim)
)
expected_output_shape = [
(None, timesteps, 6),
(None, 6),
(None, 6),
(None, 3),
(None, 3),
]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape], expected_output_shape
)
def test_stacked_rnn_with_training_param(self):
# See https://github.com/tensorflow/tensorflow/issues/32586
class CellWrapper(keras.layers.AbstractRNNCell):
def __init__(self, cell):
super().__init__()
self.cell = cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def build(self, input_shape):
self.cell.build(input_shape)
self.built = True
def get_initial_state(
self, inputs=None, batch_size=None, dtype=None
):
return self.cell.get_initial_state(
inputs=inputs, batch_size=batch_size, dtype=dtype
)
def call(self, inputs, states, training=None, **kwargs):
assert training is not None
return self.cell(inputs, states=states, training=training)
cell = keras.layers.LSTMCell(32)
cell = CellWrapper(cell)
cell = keras.layers.StackedRNNCells([cell])
rnn = keras.layers.RNN(cell)
inputs = np.ones((8, 4, 16), dtype=np.float32)
rnn(inputs, training=True)
def test_stacked_rnn_with_nested_cell(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o11, o12, o13 = 2, 3, 4
o21, o22, o23 = 4, 5, 6
# test 1: use_tuple=False
cells = [NestedCell(o11, o12, o13), NestedCell(o21, o22, o23)]
rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, state1, state2 = rnn((input_1, input_2))
s11, s12 = state1
s21, s22 = state2
self.assertEqual(output1.shape.as_list(), [None, t, o21])
self.assertEqual(output2.shape.as_list(), [None, t, o22, o23])
self.assertEqual(s11.shape.as_list(), [None, o11])
self.assertEqual(s12.shape.as_list(), [None, o12, o13])
self.assertEqual(s21.shape.as_list(), [None, o21])
self.assertEqual(s22.shape.as_list(), [None, o22, o23])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o21)), np.zeros((batch, t, o22, o23))],
)
self.assertEqual(
model.output_shape, [(None, t, o21), (None, t, o22, o23)]
)
# test 2: use_tuple=True
cells = [
NestedCell(o11, o12, o13, use_tuple=True),
NestedCell(o21, o22, o23),
]
rnn = keras.layers.RNN(cells, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, state1, state2 = rnn(
NestedInput(t1=input_1, t2=input_2)
)
s11, s12 = state1
s21, s22 = state2
self.assertEqual(output1.shape.as_list(), [None, t, o21])
self.assertEqual(output2.shape.as_list(), [None, t, o22, o23])
self.assertEqual(s11.shape.as_list(), [None, o11])
self.assertEqual(s12.shape.as_list(), [None, o12, o13])
self.assertEqual(s21.shape.as_list(), [None, o21])
self.assertEqual(s22.shape.as_list(), [None, o22, o23])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o21)), | np.zeros((batch, t, o22, o23)) | numpy.zeros |
import numpy as np
from scipy.stats import iqr
from ..config import MAX_VAL_AFTER_NORMALIZATION
from ..utils.dataUtils import morphDilation, gaussianFilter, radial_profile, applyRelativeThr, getCoordsWithinSphere
from ..utils.genericException import GenericError
DEFAULT_PERCENTIL = 95
DEFAULT_BINARY_MASK_THR= 0.01
def targetNormalizationRegr(x, mask):
mask = morphDilation(mask, 1)
binary_mask = np.where(mask > DEFAULT_BINARY_MASK_THR, 1, 0)
background = np.median(x[binary_mask < 1])
background_median = np.median(background)
background_upper_percentil = np.percentile(background, DEFAULT_PERCENTIL)
x = np.clip(x, background_median, None)
x = x * binary_mask
target_inside_mask = x[binary_mask > 0]
target_inside_mask = target_inside_mask[target_inside_mask > background_median]
target_upper_percentil = np.percentile(target_inside_mask, DEFAULT_PERCENTIL)
target_iqr = target_upper_percentil - background_upper_percentil
if target_iqr <= 0:
raise ValueError("Bad iqr %.3f. Is your input masked?. Unmasked inputs required" % target_iqr)
x = x / target_iqr
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) #Just to prevent outliers
return x
def targetNormalizationLocscale(x, mask):
binary_mask = np.where(morphDilation(mask, 1) > DEFAULT_BINARY_MASK_THR, 1, 0)
background = np.median(x[binary_mask < 1])
background_median = np.median(background)
background_upper_percentil = np.percentile(background, DEFAULT_PERCENTIL)
x = np.clip(x, background_median, None)
x = x * mask
target_inside_mask = x[binary_mask > 0]
target_inside_mask = target_inside_mask[target_inside_mask > background_median]
target_upper_percentil = np.percentile(target_inside_mask, DEFAULT_PERCENTIL)
target_iqr = target_upper_percentil - background_upper_percentil
if target_iqr <= 0:
raise ValueError("Bad iqr %.3f. Is your input masked?. Unmasked inputs required" % target_iqr)
x = x / target_iqr
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) #Just to prevent outliers
return x
def targetNormalization_2(x, y, mask):
inside_x = x[morphDilation(mask, 1)>= DEFAULT_BINARY_MASK_THR]
mean_x, std_x = np.mean(inside_x), np.std(inside_x)
inside_y= y[mask>= DEFAULT_BINARY_MASK_THR]
mean_y, std_y = np.mean(inside_y), np.std(inside_y)
y= ((y-mean_y)/std_y)*std_x + mean_x
return y
def targetNormalizationClassif(x):
x= np.clip(x, np.percentile(x,0.1), np.percentile(x,99.9))
x_norm= minMaxNormalization(x)
return x_norm
def inputNormalizationWithMask(x, mask):
mask = morphDilation(mask, 3)
mask= applyRelativeThr(mask, DEFAULT_BINARY_MASK_THR)
median_val = np.median( x[mask>0] )
iqr_val = iqr(x[mask > 0], rng=(10,DEFAULT_PERCENTIL))
x_norm= (x-median_val)/iqr_val
x_norm*= mask
return x_norm
def inputNormalizationWithMask_2(x, mask):
mask = morphDilation(mask, 3)
mask= applyRelativeThr(mask, DEFAULT_BINARY_MASK_THR)
selection= (mask>0) & (x>0)
median_val = np.median( x[selection ] )
iqr_val = iqr(x[selection], rng=(10,DEFAULT_PERCENTIL))
# iqr_val= x[selection].max()- x[selection].min()
x_norm= (x-median_val)/iqr_val
x_norm*= mask
return x_norm
def inputNormalizationWithMask_3(x, mask): #This might is too tight for general purposes
mask = np.where(morphDilation(mask, 1) > DEFAULT_BINARY_MASK_THR, 1, 0)
selection= (mask>0) & (x>0)
median_val = np.median( x[selection ] )
iqr_val = iqr(x[selection], rng=(10,DEFAULT_PERCENTIL))
# iqr_val= x[selection].max()- x[selection].min()
x_norm= (x-median_val)/iqr_val
x_norm*= mask
return x_norm
def inputNormalization_classification(x):
x_min= -x.min()
x_range= x.max()-x_min
midPoint= x_min+ x_range*.5
conditionSplit= x<midPoint
x_min= np.percentile(x[conditionSplit], 5)
x_max = np.percentile(x[~ conditionSplit], DEFAULT_PERCENTIL)
if not np.isclose(x_min, x_max):
x= x/(x_max-x_min)
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) # Just to prevent outliers
return x
def inputNormalization(x):
x= robustNormalization(x )
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) # Just to prevent outliers
return x
def inputNormalization_2(x):
from skimage.filters import threshold_otsu
otsu_thr= threshold_otsu(x)
out_mean= np.mean(x[x<otsu_thr])
inner_range= iqr(x[x>=otsu_thr], rng= (10, DEFAULT_PERCENTIL) )
if inner_range==0:
raise NormalizationError("warning, bad iqr %.3f. Is your input masked?. Unmasked inputs required" % inner_range)
x=(x- out_mean)/inner_range
x = np.clip(x, None, MAX_VAL_AFTER_NORMALIZATION) # Just to prevent outliers
return x
def inputNormalization_3(x, noise_stats=None):
'''
Performs input normalization using typical cryo-em schema of normalizing according noise:
let noise mean be 0 and noise std=0.1
:param x: input volume
:param noise_stats=(mean_noise, std_noise): The statistics of the noise for the input volumen. If none, it will try to automatically
guess them
:return: normalized input
'''
if noise_stats is None:
meanInNoise, stdInNosise= _guessNoiseStats_radialProfile(x)
print("Noise stats: mean=%f std=%f"%(meanInNoise, stdInNosise))
else:
meanInNoise, stdInNosise= noise_stats
x_norm= (x-meanInNoise)/ (stdInNosise*10) #Desired noise distribution mean=0 and std=0.1
assert not np.any(np.isnan(x_norm)), "Error normalizing input. Some nans were generated in the volume. Try an alternative normalization option"
return x_norm
def _guessNoiseStats_radialProfile(x):
from .dataUtils import resizeVol
from scipy import ndimage
from scipy.signal import argrelextrema
#First part is a set of heuristics to identify the circular noise around the protein
x_gauss= gaussianFilter(resizeVol(x, (100, 100, 100)), 0.1 ) #Resize to seep up and filter to reduce noise level.
win_size=5
win_mean = ndimage.uniform_filter(x_gauss, win_size)
win_sqr_mean = ndimage.uniform_filter(x_gauss ** 2, win_size) #This is a good estimation of the protein region
win_var = win_sqr_mean - win_mean ** 2
interestingCurve= radial_profile(win_var)-radial_profile(win_mean)
energyCurve= radial_profile(win_sqr_mean)
# import matplotlib.pyplot as plt
# f= plt.figure()
# plt.plot(radial_profile(win_mean),label='win_mean')
# plt.plot(radial_profile(win_sqr_mean),label='win_sqr_mean')
# plt.plot(radial_profile(win_var),label='win_var')
# plt.plot(radial_profile(win_var)-radial_profile(win_mean),label='win_var_minus_win_mean')
# plt.legend()
# f.show()
# #plt.show()
#
# from devel_code.trainNet.dataManager import plot_vol_and_target
# plot_vol_and_target(x, x_gauss, win_sqr_mean)
candidateMinima= argrelextrema(interestingCurve, np.less)[0]
if len(candidateMinima)>0:
toLookIndex= np.min(candidateMinima)
if interestingCurve[toLookIndex]>=0:
toLookIndex = np.min(np.argmin(interestingCurve)) # Noise border will be at index > toLookIndex
else:
toLookIndex = np.min(np.argmin(interestingCurve)) # Noise border will be at index > toLookIndex
if toLookIndex>50: #Radial noise, the most typical, has 50 over 100 voxels radius
candidateNoiseDist = x_gauss.shape[0] // 2
print("Automatic radial noise detection may have failed. No suitable min index found. Guessing radial noise of radius %s%%"%(candidateNoiseDist))
else:
maxInterestingIdx= np.min(np.argmax(interestingCurve[toLookIndex:51])).astype(np.int32)+toLookIndex
if ( energyCurve[maxInterestingIdx]> interestingCurve[maxInterestingIdx] and interestingCurve[maxInterestingIdx]>0) : #np.isclose(maxInterestingIdx, maxWinMean, rtol=1e-2):
raise NormalizationError("Warning, the input might be hollow structure. Automatic masking might fail. Aborting...")
try:
toLookIndex2= np.min(np.where(interestingCurve[toLookIndex:]>0))+toLookIndex
try:
toLookIndex3 = np.min(np.where(interestingCurve[toLookIndex2:] <= 0)) + toLookIndex2
candidateNoiseDist = round((toLookIndex2 + toLookIndex3) * 0.5)
grad_1 = np.mean(np.diff(interestingCurve[-10:]))
grad_2 = np.mean(np.diff(interestingCurve[-25:-10]))
if grad_1 > 1e-8 and grad_2 >1e-8 and grad_1 > 3 * grad_2:
candidateNoiseDist = | np.sqrt(3 * (x_gauss.shape[0] // 2) ** 2) | numpy.sqrt |
'''
@Authors: <NAME>, <NAME>, <NAME>, <NAME>
@Purpose: Explore 6DOF rocket trajectory, esspecially quaternion rotation
Learning resources: https://eater.net/quaternions
'''
import numpy as np
import oyaml as yaml
import math
class Rotator:
def __init__(self):
self.re = 0; self.i = 0; self.j = 0; self.k = 1
self.body_vector = np.array([[0],[1],[0],[0]])
'''
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations
This function should take inputs: 'Cartesian, unit rotation-axis (Vector),
Rotation Angle in radians (Theta) and form a quaternion vector
'''
def form_quaternion(self, vector, theta):
assert self.vector_is_unit(vector), 'Class: Rotator, Fxn: form_quaternion, vector is not a unit quaternion'
r = np.cos(theta/2)
i = -1*np.sin(theta/2)*vector[0]
j = -1*np.sin(theta/2)*vector[1]
k = -1*np.sin(theta/2)*vector[2]
quaternion = np.array([[r],[i],[j],[k]])
return quaternion
'''
https://math.stackexchange.com/questions/40164/how-do-you-rotate-a-vector-by-a-unit-quaternion
'''
def rotate_body(self, quaternion):
left = quaternion
right = np.array([[quaternion[0]], -quaternion[1], -quaternion[2], -quaternion[3]])
h1 = self.hamilton_product(left, self.body_vector)
print('H1: {}'.format(h1))
self.body_vector = self.hamilton_product(h1,right)
'''
https://en.wikipedia.org/wiki/Quaternion#Hamilton_product
https://math.stackexchange.com/questions/40164/how-do-you-rotate-a-vector-by-a-unit-quaternion
'''
def hamilton_product(self, vec1, vec2):
a1 = vec1[0]; a2 = vec2[0]
b1 = vec1[1]; b2 = vec2[1]
c1 = vec1[2]; c2 = vec2[2]
d1 = vec1[3]; d2 = vec2[3]
r = float(a1*a2 - b1*b2 - c1*c2 - d1*d2)
x = float(a1*b2 + b1*a2 + c1*d2 - d1*c2)
y = float(a1*c2 - b1*d2 + c1*a2 + d1*b2)
z = float(d1*d2 + b1*c2 - c1*b2 + d1*a2)
return np.array([[r],[x],[y],[z]])
def report_body_vector(self):
print(self.body_vector)
'''
Convert some arbitrary vector to a unit vector (divide components by the magnitude)
'''
def unitify_vector(self, vector):
mag = np.linalg.norm(vector)
return vector/mag
'''
Checker function to verify a vector of arbitrary length is a unit vector
Tolerance variable to allow 'close enough' cases to succeed
'''
def vector_is_unit(vec):
squares = [x*x for x in vec]
vec_sum = np.sum(squares)
norm = np.sqrt(vec_sum)
tolerance = .01
if abs(norm-1) < tolerance:
return true
return false
'''
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Using_quaternions_as_rotations
q' = q2q1
q1 first, then q2
USE QUATERNION MULTIPLICATION RULE:
v*w where v and w are both quaternions with no real part
v*w = v x w - v * w
v*w where v and w are both quaternions with real part s and t (see wikipedia)
v*w = (s + v)*(t + w) = (st - v * w)+(sw + tv + v x w)
'''
def combine_quaternions(self, q1, q2):
re1 = q1[0]; re2 = q2[0]
q1 = q1[1:3]; q2 = q2[1:3]
cross = np.cross(q2, q1); dot = np.dot(q2, q1)
re_prime = (re1*re2 - dot)
temp = re1*q2 + re2*q1 + cross
q_prime = np.array([[re_prime],[temp[0]],[temp[1]],[temp[2]]])
return q_prime
class Rocket(Rotator):
def __init__(self, rocket_data, environment_obj, reference=None, units='english'):
super().__init__()
'''
abs_orientation is a (unit) vector representing the orientation of the rocket
relative to the launch location axis or global axis
'''
self.abs_orientation = | np.array([[0],[0],[1]]) | numpy.array |
import numpy as np
import numpy.ma as ma
from scipy.sparse import csc_matrix as sparse_matrix
from scipy.sparse.linalg import eigs
from scipy.linalg import eig
from scipy.sparse import diags,identity,coo_matrix
import msmtools.estimation as msm_estimation
import msmtools.analysis as msm_analysis
import stats
import matplotlib.pyplot as plt
def segment_maskedArray(tseries,min_size=50):
'''
Segments time series in case it has missing data
'''
if len(tseries.shape)>1:
mask = ~np.any(tseries.mask,axis=1)
else:
mask = ~tseries.mask
segments = np.where(np.abs(np.diff(np.concatenate([[False], mask, [False]]))))[0].reshape(-1, 2)
return segments
def get_count_matrix(labels,lag,nstates):
observable_seqs = ma.compress_rows(ma.vstack([labels[:-lag],labels[lag:]]).T)
row = observable_seqs[:,0]
col = observable_seqs[:,1]
data = np.ones(row.size)
C = coo_matrix((data, (row, col)), shape=(nstates, nstates))
count_matrix = C.tocsr()
return count_matrix
def get_count_ms(dtrajs,delay,nstates):
if len(dtrajs.shape)>1:
count_ms = coo_matrix((nstates,nstates))
for dtraj in dtrajs:
try:
count_ms+=get_count_matrix(dtraj,delay,nstates)
except:
print('Warning! No samples.')
continue
else:
try:
count_ms=get_count_matrix(dtrajs,delay,nstates)
except:
print('Warning! No samples.')
return count_ms
def tscales_samples(labels,delay,dt,size,n_modes=5,reversible=True):
dtrajs = get_split_trajs(labels,size)
nstates = np.max(labels)+1
P_traj=[]
ts_traj = []
for sample_traj in dtrajs:
count_ms = get_count_ms(sample_traj,delay,nstates)
connected_count_matrix = msm_estimation.connected_cmatrix(count_ms)
P = msm_estimation.tmatrix(connected_count_matrix)
if reversible:
R = get_reversible_transition_matrix(P)
tscale = compute_tscales(R,delay,dt,k=n_modes+1)
else:
tscale = compute_tscales(P,delay,dt,k=n_modes+1)
ts_traj.append(tscale)
P_traj.append(P)
return ts_traj,P_traj
def transition_matrix(labels,lag,return_connected=False):
nstates = np.max(labels)+1
count_matrix = get_count_matrix(labels,lag,nstates)
connected_count_matrix = msm_estimation.connected_cmatrix(count_matrix)
P = msm_estimation.tmatrix(connected_count_matrix)
if return_connected:
lcs = msm_estimation.largest_connected_set(count_matrix)
return lcs,P
else:
return P
def get_connected_labels(labels,lcs):
final_labels = ma.zeros(labels.shape,dtype=int)
for key in np.argsort(lcs):
final_labels[labels==lcs[key]]=key+1
final_labels[final_labels==0] = ma.masked
final_labels-=1
return final_labels
def sorted_spectrum(R,k=5,which='LR'):
eigvals,eigvecs = eigs(R,k=k,which=which)
sorted_indices = np.argsort(eigvals.real)[::-1]
return eigvals[sorted_indices],eigvecs[:,sorted_indices]
def compute_tscales(P,delay,dt=1,k=2):
try:
if P.shape[1]<=10:
eigvals = np.sort(eig(P.toarray())[0])[::-1][:k]
else:
eigvals = eigs(P,k=k,which='LR',return_eigenvectors=False)
sorted_indices = np.argsort(eigvals.real)[::-1]
eigvals = eigvals[sorted_indices][1:].real
eigvals[np.abs(eigvals-1)<1e-12] = np.nan
eigvals[eigvals<1e-12] = np.nan
return -(delay*dt)/np.log(np.abs(eigvals))
except:
return np.array([np.nan]*(k-1))
def get_reversible_transition_matrix(P):
probs = stationary_distribution(P)
P_hat = diags(1/probs)*P.transpose()*diags(probs)
R=(P+P_hat)/2
return R
def get_split_trajs(labels,size = 0):
if size == 0:
size = len(labels)/20
return ma.array([labels[kt:kt+size] for kt in range(0,len(labels)-size,size)])
def implied_tscale(labels,size,delay,dt,n_modes,reversible=True):
dtrajs = get_split_trajs(labels,size)
nstates = np.max(labels)+1
count_ms = get_count_ms(dtrajs,delay,nstates)
connected_count_matrix = msm_estimation.connected_cmatrix(count_ms)
P = msm_estimation.tmatrix(connected_count_matrix)
if reversible:
R = get_reversible_transition_matrix(P)
tscale = compute_tscales(R,delay,dt,k=n_modes+1)
else:
tscale = compute_tscales(P,delay,dt,k=n_modes+1)
return tscale
def get_bootstrapped_Ps(labels,delay,n_samples,size = 0):
#get dtrajs to deal with possible nans
dtrajs = get_split_trajs(labels,size)
nstates = np.unique(labels.compressed()).shape[0]
sample_Ps=[]
for k in range(n_samples):
sample_trajs = [dtrajs[k] for k in np.random.randint(0,len(dtrajs),len(dtrajs))]
count_ms = get_count_ms(sample_trajs,delay,nstates)
connected_count_matrix = msm_estimation.connected_cmatrix(count_ms)
P = msm_estimation.tmatrix(connected_count_matrix)
sample_Ps.append(P)
return sample_Ps
def boostrap_tscales(labels,delay,dt,n_modes,n_samples = 1000,size=0,reversible=True):
Ps = get_bootstrapped_Ps(labels,delay,n_samples,size)
tscales=np.zeros((n_samples,n_modes))
for k,P in enumerate(Ps):
if reversible:
R = get_reversible_transition_matrix(P)
tscale = compute_tscales(R,delay,dt,k=n_modes+1)
else:
tscale = compute_tscales(P,delay,dt,k=n_modes+1)
tscales[k,:]=tscale
return tscales
def bootstrap_tscale_sample(labels,delay,dt,n_modes,size=0,reversible=True):
dtrajs = get_split_trajs(labels,size)
nstates = np.unique(labels.compressed()).shape[0]
sample_trajs = [dtrajs[k] for k in np.random.randint(0,len(dtrajs),len(dtrajs))]
count_ms = get_count_ms(sample_trajs,delay,nstates)
connected_count_matrix = msm_estimation.connected_cmatrix(count_ms)
P = msm_estimation.tmatrix(connected_count_matrix)
if reversible:
R = get_reversible_transition_matrix(P)
tscale = compute_tscales(R,delay,dt,k=n_modes+1)
else:
tscale = compute_tscales(P,delay,dt,k=n_modes+1)
return tscale
def bootstrap_tscales_delays(range_delays,labels,n_modes,dt,n_samples=1000,size=0,reversible=True):
dtrajs = get_split_trajs(labels,size)
nstates = np.unique(labels.compressed()).shape[0]
sample_trajs = [dtrajs[k] for k in np.random.randint(0,len(dtrajs),len(dtrajs))]
tscales=np.zeros((len(range_delays),n_modes))
for kd,delay in enumerate(range_delays):
try:
count_ms = get_count_ms(sample_trajs,delay,nstates)
connected_count_matrix = msm_estimation.connected_cmatrix(count_ms)
P = msm_estimation.tmatrix(connected_count_matrix)
if reversible:
R = get_reversible_transition_matrix(P)
tscale = compute_tscales(R,delay,dt,k=n_modes+1)
else:
tscale = compute_tscales(P,delay,dt,k=n_modes+1)
tscales[kd,:] = tscale
except:
continue
return tscales
def compute_implied_tscales(labels,range_delays,dt=1,n_modes=5,n_samples=1000,size=0,reversible=False,confidence = 95):
if size==0:
size = np.max(range_delays)*2
cil = (100-confidence)/2
ciu = 100-cil
cil_delay=np.zeros((len(range_delays),n_modes))
ciu_delay=np.zeros((len(range_delays),n_modes))
mean_delay=np.zeros((len(range_delays),n_modes))
bootstrapped_tscales = []
for kd,delay in enumerate(range_delays):
mean_tscale = implied_tscale(labels,size,delay,dt,n_modes,reversible)
tscales_samples = boostrap_tscales(labels,delay,dt,n_modes,n_samples,size,reversible)
mean_delay[kd,:] = mean_tscale
cil_delay[kd,:] = np.nanpercentile(tscales_samples,cil,axis=0)
ciu_delay[kd,:] = np.nanpercentile(tscales_samples,ciu,axis=0)
return cil_delay,ciu_delay,mean_delay
def find_asymptote(ts,tol=1e-5):
es_0 = np.percentile(ts,80)
tau_0 = np.where(np.diff(np.sign(ts-es_0)))[0][0]
es_list=[es_0,]
tau_list=[tau_0,]
m,b=np.polyfit(np.arange(tau_0,len(ts)),np.cumsum(ts)[tau_list[-1]:],1)
es_list.append(m)
tau_list.append(np.where(np.diff(np.sign(ts-m)))[0][0])
while es_list[-1]-es_list[-2]>tol:
m,b=np.polyfit(np.arange(tau_list[-1],len(ts)),np.cumsum(ts)[tau_list[-1]:],1)
es_list.append(m)
try:
tau_list.append(np.where(np.diff(np.sign(ts-m)))[0][0])
except:
break
return tau_list[-1],es_list[-1]
def stationary_distribution(P):
probs = msm_analysis.statdist(P)
return probs
def get_entropy(labels):
#get dtrajs to deal with possible nans
P = transition_matrix(labels,1)
probs = stationary_distribution(P)
logP = P.copy()
logP.data = np.log(logP.data)
return (-diags(probs).dot(P.multiply(logP))).sum()
def simulate(P,state0,iters):
'''
Monte Carlo simulation of the markov chain characterized by the matrix P
state0: initial system
iters: number of iterations of the simulation
'''
states = np.zeros(iters,dtype=int)
states[0]=state0
state=state0
for k in range(1,iters):
new_state = np.random.choice(np.arange(P.shape[1]),p=list(np.hstack(P[state,:].toarray())))
state=new_state
states[k]=state
return states
def state_lifetime(states,tau):
'''
Get distribution of lifetimes of each state in states
tau is the sampling time of the states
'''
durations=[]
for state in np.sort(np.unique(states.compressed())):
gaps = states==state
gaps_boundaries = np.where(np.abs(np.diff(np.concatenate([[False], gaps, [False]]))))[0].reshape(-1, 2)
durations.append(np.hstack(np.diff(gaps_boundaries))*tau)
return durations
from scipy.signal import find_peaks
def optimal_partition(phi2,inv_measure,P,return_rho = True):
X = phi2
c_range = np.sort(phi2)[1:-1]
rho_c = np.zeros(len(c_range))
rho_sets = np.zeros((len(c_range),2))
for kc,c in enumerate(c_range):
labels = np.zeros(len(X),dtype=int)
labels[X<=c] = 1
rho_sets[kc] = [(inv_measure[labels==idx]*(P[labels==idx,:][:,labels==idx])).sum()/inv_measure[labels==idx].sum()
for idx in range(2)]
rho_c = np.min(rho_sets,axis=1)
peaks, heights = find_peaks(rho_c, height=0.5)
if len(peaks)==0: #lower height
print('No prominent coherent set')
return None
else:
idx = peaks[np.argmax(heights['peak_heights'])]
c_opt = c_range[idx]
kmeans_labels = np.zeros(len(X),dtype=int)
kmeans_labels[X<c_opt] = 1
if return_rho:
return c_range,rho_sets,idx,kmeans_labels
else:
return kmeans_labels
def subdivide_state_optimal(phi2,kmeans_labels,inv_measure,P,indices,plot):
if plot:
c_range,rho_sets,idx,labels_ = optimal_partition(phi2,inv_measure,P,return_rho=True)
kmeans_labels[indices] = labels_+np.max(kmeans_labels)+1
plt.figure(figsize=(5,5))
plt.scatter(c_range,rho_sets[:,0],s=10)
plt.scatter(c_range,rho_sets[:,1],s=10)
rho_c = np.min(rho_sets,axis=1)
plt.plot(c_range,rho_c,c='k',ls='--')
plt.scatter(c_range[idx],rho_c[idx],c='r',marker='x')
plt.ylim(.2,1)
plt.xlabel(r'$\phi_2$',fontsize=15)
plt.ylabel(r'$\rho$',fontsize=15)
plt.xticks(fontsize=12)
print(len(np.unique(kmeans_labels)))
plt.show()
else:
kmeans_labels[indices] = optimal_partition(phi2,inv_measure,P,return_rho=False)+np.max(kmeans_labels)+1
final_kmeans_labels = np.zeros(kmeans_labels.shape,dtype=int)
for new_idx,label in enumerate(np.sort(np.unique(kmeans_labels))):
final_kmeans_labels[kmeans_labels==label]=new_idx
return final_kmeans_labels
def recursive_partitioning_optimal(final_labels,delay,phi2,inv_measure,P,n_final_states,plot=False,save=False):
c_range,rho_sets,idx,kmeans_labels = optimal_partition(phi2,inv_measure,P,return_rho=True)
if plot:
plt.figure(figsize=(5,5))
plt.scatter(c_range,rho_sets[:,0],s=10)
plt.scatter(c_range,rho_sets[:,1],s=10)
rho_c = np.min(rho_sets,axis=1)
plt.plot(c_range,rho_c,c='k',ls='--')
plt.scatter(c_range[idx],rho_c[idx],c='r',marker='x')
plt.ylim(.3,1)
# plt.xlim(-0.04,0.04)
plt.xlabel(r'$\phi_2$',fontsize=15)
plt.ylabel(r'$\rho$',fontsize=15)
plt.xticks(fontsize=12)
print(len( | np.unique(kmeans_labels) | numpy.unique |
#!/usr/bin/env python
u"""
read_cryosat_L1b.py
Written by <NAME> (02/2020)
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
OUTPUTS:
Location: Time and Orbit Group
Data: Measurements Group
Geometry: External Corrections Group
Waveform_1Hz: Average Waveforms Group
Waveform_20Hz: Waveforms Group (with SAR/SARIN Beam Behavior Parameters)
METADATA: MPH, SPH and DSD Header data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 02/2020: tilde-expansion of cryosat-2 files before opening
add scale factors function for converting packed units in binary files
convert from hard to soft tabulation
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
will output with same variable names as the binary read functions
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import os
import re
import netCDF4
import numpy as np
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baselines A and B
def cryosat_baseline_AB(fid, n_records, MODE):
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100): converted from telemetry units to be
#-- the noise floor of FBR measurement echoes.
#-- Set to -9999.99 when the telemetry contains zero.
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
#-- CryoSat-2 mode specific waveforms
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [512]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [512]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
#-- Phase Difference [512]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline C
def cryosat_baseline_C(fid, n_records, MODE):
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Star Tracker ID
Location['ST_ID'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
Location['Spares'] = np.zeros((n_records,n_blocks,2),dtype=np.int16)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100)
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Standard deviation as a function of boresight angle (microradians)
Beam_Behavior['SD_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center angle as a function of boresight angle (microradians)
Beam_Behavior['Center_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-7),dtype=np.int16)
#-- CryoSat-2 mode specific waveform variables
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [256]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [1024]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [1024]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int16)
#-- Phase Difference [1024]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['ST_ID'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Location['Roll'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Pitch'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Yaw'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Spares'][r,b,:] = np.fromfile(fid,dtype='>i2',count=2)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_BC_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_BC_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline D (netCDF4)
def cryosat_baseline_D(full_filename, MODE, UNPACK=False):
#-- open netCDF4 file for reading
fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')
#-- use original unscaled units unless UNPACK=True
fid.set_auto_scale(UNPACK)
#-- get dimensions
ind_first_meas_20hz_01 = fid.variables['ind_first_meas_20hz_01'][:].copy()
ind_meas_1hz_20_ku = fid.variables['ind_meas_1hz_20_ku'][:].copy()
n_records = len(ind_first_meas_20hz_01)
n_SARIN_D_RW = 1024
n_SARIN_RW = 512
n_SAR_D_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- MDS Time
Location['Time'] = np.ma.zeros((n_records,n_blocks))
Location['Time'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
time_20_ku = fid.variables['time_20_ku'][:].copy()
#-- Time: day part
Location['Day'] = np.ma.zeros((n_records,n_blocks))
Location['Day'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: second part
Location['Second'] = np.ma.zeros((n_records,n_blocks))
Location['Second'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: microsecond part
Location['Micsec'] = np.ma.zeros((n_records,n_blocks))
Location['Micsec'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- USO correction factor
Location['USO_Corr'] = np.ma.zeros((n_records,n_blocks))
Location['USO_Corr'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
uso_cor_20_ku = fid.variables['uso_cor_20_ku'][:].copy()
#-- Mode ID
Location['Mode_ID'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_op_20_ku =fid.variables['flag_instr_mode_op_20_ku'][:].copy()
#-- Mode Flags
Location['Mode_flags'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_flags'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_flags_20_ku =fid.variables['flag_instr_mode_flags_20_ku'][:].copy()
#-- Platform attitude control mode
Location['Att_control'] = np.ma.zeros((n_records,n_blocks))
Location['Att_control'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_att_ctrl_20_ku =fid.variables['flag_instr_mode_att_ctrl_20_ku'][:].copy()
#-- Instrument configuration
Location['Inst_config'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_config'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_flags_20_ku = fid.variables['flag_instr_conf_rx_flags_20_ku'][:].copy()
#-- acquisition band
Location['Inst_band'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_band'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_bwdt_20_ku = fid.variables['flag_instr_conf_rx_bwdt_20_ku'][:].copy()
#-- instrument channel
Location['Inst_channel'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_channel'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_in_use_20_ku = fid.variables['flag_instr_conf_rx_in_use_20_ku'][:].copy()
#-- tracking mode
Location['Tracking_mode'] = np.ma.zeros((n_records,n_blocks))
Location['Tracking_mode'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_trk_mode_20_ku = fid.variables['flag_instr_conf_rx_trk_mode_20_ku'][:].copy()
#-- Source sequence counter
Location['SSC'] = np.ma.zeros((n_records,n_blocks))
Location['SSC'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
seq_count_20_ku = fid.variables['seq_count_20_ku'][:].copy()
#-- Record Counter
Location['Rec_Count'] = np.ma.zeros((n_records,n_blocks))
Location['Rec_Count'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
rec_count_20_ku = fid.variables['rec_count_20_ku'][:].copy()
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.ma.zeros((n_records,n_blocks))
Location['Lat'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lat_20_ku = fid.variables['lat_20_ku'][:].copy()
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.ma.zeros((n_records,n_blocks))
Location['Lon'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lon_20_ku = fid.variables['lon_20_ku'][:].copy()
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.ma.zeros((n_records,n_blocks))
Location['Alt'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
alt_20_ku = fid.variables['alt_20_ku'][:].copy()
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.ma.zeros((n_records,n_blocks))
Location['Alt_rate'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
orb_alt_rate_20_ku = fid.variables['orb_alt_rate_20_ku'][:].copy()
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3))
Location['Sat_velocity'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
sat_vel_vec_20_ku = fid.variables['sat_vel_vec_20_ku'][:].copy()
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.ma.zeros((n_records,n_blocks,3))
Location['Real_beam'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
beam_dir_vec_20_ku = fid.variables['beam_dir_vec_20_ku'][:].copy()
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.ma.zeros((n_records,n_blocks,3))
Location['Baseline'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
inter_base_vec_20_ku = fid.variables['inter_base_vec_20_ku'][:].copy()
#-- Star Tracker ID
Location['ST_ID'] = np.ma.zeros((n_records,n_blocks))
Location['ST_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_str_in_use_20_ku = fid.variables['flag_instr_conf_rx_str_in_use_20_ku'][:].copy()
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.ma.zeros((n_records,n_blocks))
Location['Roll'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_roll_angle_str_20_ku = fid.variables['off_nadir_roll_angle_str_20_ku'][:].copy()
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.ma.zeros((n_records,n_blocks))
Location['Pitch'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_pitch_angle_str_20_ku = fid.variables['off_nadir_pitch_angle_str_20_ku'][:].copy()
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.ma.zeros((n_records,n_blocks))
Location['Yaw'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_yaw_angle_str_20_ku = fid.variables['off_nadir_yaw_angle_str_20_ku'][:].copy()
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.ma.zeros((n_records,n_blocks))
Location['MCD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_mcd_20_ku = fid.variables['flag_mcd_20_ku'][:].copy()
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
window_del_20_ku = fid.variables['window_del_20_ku'][:].copy()
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['H_0'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_applied_20_ku = fid.variables['h0_applied_20_ku'][:].copy()
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['COR2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
cor2_applied_20_ku = fid.variables['cor2_applied_20_ku'][:].copy()
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['LAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_lai_word_20_ku = fid.variables['h0_lai_word_20_ku'][:].copy()
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['FAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_fai_word_20_ku = fid.variables['h0_fai_word_20_ku'][:].copy()
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH1'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch1_20_ku = fid.variables['agc_ch1_20_ku'][:].copy()
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch2_20_ku = fid.variables['agc_ch2_20_ku'][:].copy()
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = | np.ma.zeros((n_records,n_blocks)) | numpy.ma.zeros |
import numpy as np
import gym
from meta_mb.logger import logger
from meta_mb.meta_envs.base import MetaEnv
from gym.envs.mujoco.mujoco_env import MujocoEnv
class SwimmerRandVelEnv(MetaEnv, MujocoEnv, gym.utils.EzPickle):
def __init__(self):
self.set_task(self.sample_tasks(1)[0])
MujocoEnv.__init__(self, 'swimmer.xml', 4)
gym.utils.EzPickle.__init__(self)
def sample_tasks(self, n_tasks):
# for fwd/bwd env, goal direc is backwards if - 1.0, forwards if + 1.0
return np.random.uniform(0.1, 0.2, (n_tasks, ))
def set_task(self, task):
"""
Args:
task: task of the meta-learning environment
"""
self.goal_vel = task
def get_task(self):
"""
Returns:
task: task of the meta-learning environment
"""
return self.goal_vel
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = np.abs((xposafter - xposbefore) / self.dt - self.goal_vel)
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)
)
return self._get_obs()
def log_diagnostics(self, paths, prefix=''):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular(prefix+'AverageForwardProgress', np.mean(progs))
logger.record_tabular(prefix+'MaxForwardProgress', np.max(progs))
logger.record_tabular(prefix+'MinForwardProgress', np.min(progs))
logger.record_tabular(prefix+'StdForwardProgress', | np.std(progs) | numpy.std |
"""
Implementation of various calibration methods from https://github.com/JonathanWenger/pycalib.
"""
import warnings
import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster.vq
import scipy.optimize
import scipy.special
import scipy.stats
import sklearn
import sklearn.isotonic
import sklearn.linear_model
import sklearn.multiclass
import sklearn.utils
from sklearn.base import clone
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils._joblib import Parallel
from sklearn.utils._joblib import delayed
from sklearn.utils.validation import check_is_fitted
# Ignore binned_statistic FutureWarning
# warnings.simplefilter(action='ignore', category=FutureWarning)
class CalibrationMethod(sklearn.base.BaseEstimator):
"""
A generic class for probability calibration
A calibration method takes a set of posterior class probabilities and transform them into calibrated posterior
probabilities. Calibrated in this sense means that the empirical frequency of a correct class prediction matches its
predicted posterior probability.
"""
def __init__(self):
super().__init__()
def fit(self, X, y):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
Returns
-------
self : object
Returns an instance of self.
"""
raise NotImplementedError("Subclass must implement this method.")
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
raise NotImplementedError("Subclass must implement this method.")
def predict(self, X):
"""
Predict the class of new samples after scaling. Predictions are identical to the ones from the uncalibrated
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
C : array, shape (n_samples,)
The predicted classes.
"""
return np.argmax(self.predict_proba(X), axis=1)
def plot(self, filename, xlim=[0, 1], **kwargs):
"""
Plot the calibration map.
Parameters
----------
xlim : array-like
Range of inputs of the calibration map to be plotted.
**kwargs :
Additional arguments passed on to :func:`matplotlib.plot`.
"""
# TODO: Fix this plotting function
# Generate data and transform
x = np.linspace(0, 1, 10000)
y = self.predict_proba(np.column_stack([1 - x, x]))[:, 1]
# Plot and label
plt.plot(x, y, **kwargs)
plt.xlim(xlim)
plt.xlabel("p(y=1|x)")
plt.ylabel("f(p(y=1|x))")
class NoCalibration(CalibrationMethod):
"""
A class that performs no calibration.
This class can be used as a baseline for benchmarking.
logits : bool, default=False
Are the inputs for calibration logits (e.g. from a neural network)?
"""
def __init__(self, logits=False):
self.logits = logits
def fit(self, X, y):
return self
def predict_proba(self, X):
if self.logits:
return scipy.special.softmax(X, axis=1)
else:
return X
class TemperatureScaling(CalibrationMethod):
"""
Probability calibration using temperature scaling
Temperature scaling [1]_ is a one parameter multi-class scaling method. Output confidence scores are calibrated,
meaning they match empirical frequencies of the associated class prediction. Temperature scaling does not change the
class predictions of the underlying model.
Parameters
----------
T_init : float
Initial temperature parameter used for scaling. This parameter is optimized in order to calibrate output
probabilities.
verbose : bool
Print information on optimization procedure.
References
----------
.. [1] On calibration of modern neural networks, <NAME>, <NAME>, <NAME>, <NAME>, ICML 2017
"""
def __init__(self, T_init=1.0, verbose=False):
super().__init__()
if T_init <= 0:
raise ValueError("Temperature not greater than 0.")
self.T_init = T_init
self.verbose = verbose
def fit(self, X, y):
"""
Fit the calibration method based on the given uncalibrated class probabilities or logits X and ground truth
labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities or logits of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
Returns
-------
self : object
Returns an instance of self.
"""
# Define objective function (NLL / cross entropy)
def objective(T):
# Calibrate with given T
P = scipy.special.softmax(X / T, axis=1)
# Compute negative log-likelihood
P_y = P[np.array(np.arange(0, X.shape[0])), y]
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
NLL = - np.sum(np.log(P_y + tiny))
return NLL
# Derivative of the objective with respect to the temperature T
def gradient(T):
# Exponential terms
E = np.exp(X / T)
# Gradient
dT_i = (np.sum(E * (X - X[np.array(np.arange(0, X.shape[0])), y].reshape(-1, 1)), axis=1)) \
/ np.sum(E, axis=1)
grad = - dT_i.sum() / T ** 2
return grad
# Optimize
self.T = scipy.optimize.fmin_bfgs(f=objective, x0=self.T_init,
fprime=gradient, gtol=1e-06, disp=self.verbose)[0]
# Check for T > 0
if self.T <= 0:
raise ValueError("Temperature not greater than 0.")
return self
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
# Check is fitted
check_is_fitted(self, "T")
# Transform with scaled softmax
return scipy.special.softmax(X / self.T, axis=1)
def latent(self, z):
"""
Evaluate the latent function Tz of temperature scaling.
Parameters
----------
z : array-like, shape=(n_evaluations,)
Input confidence for which to evaluate the latent function.
Returns
-------
f : array-like, shape=(n_evaluations,)
Values of the latent function at z.
"""
check_is_fitted(self, "T")
return self.T * z
def plot_latent(self, z, filename, **kwargs):
"""
Plot the latent function of the calibration method.
Parameters
----------
z : array-like, shape=(n_evaluations,)
Input confidence to plot latent function for.
filename :
Filename / -path where to save output.
kwargs
Additional arguments passed on to matplotlib.pyplot.subplots.
Returns
-------
"""
pass
class PlattScaling(CalibrationMethod):
"""
Probability calibration using Platt scaling
Platt scaling [1]_ [2]_ is a parametric method designed to output calibrated posterior probabilities for (non-probabilistic)
binary classifiers. It was originally introduced in the context of SVMs. It works by fitting a logistic
regression model to the model output using the negative log-likelihood as a loss function.
Parameters
----------
regularization : float, default=10^(-12)
Regularization constant, determining degree of regularization in logistic regression.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling the data.
If `int`, `random_state` is the seed used by the random number generator;
If `RandomState` instance, `random_state` is the random number generator;
If `None`, the random number generator is the RandomState instance used
by `np.random`.
References
----------
.. [1] <NAME>. Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood
Methods in Advances in Large-Margin Classifiers (MIT Press, 1999)
.. [2] <NAME>., <NAME>. & <NAME>. A note on Platt’s probabilistic outputs for support vector machines.
Machine learning 68, 267–276 (2007)
"""
def __init__(self, regularization=10 ** -12, random_state=None):
super().__init__()
self.regularization = regularization
self.random_state = sklearn.utils.check_random_state(random_state)
def fit(self, X, y, n_jobs=None):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
Returns
-------
self : object
Returns an instance of self.
"""
if X.ndim == 1:
raise ValueError("Calibration training data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
self.logistic_regressor_ = sklearn.linear_model.LogisticRegression(C=1 / self.regularization,
solver='lbfgs',
random_state=self.random_state)
self.logistic_regressor_.fit(X[:, 1].reshape(-1, 1), y)
elif np.shape(X)[1] > 2:
self.onevsrest_calibrator_ = OneVsRestCalibrator(calibrator=clone(self), n_jobs=n_jobs)
self.onevsrest_calibrator_.fit(X, y)
return self
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
if X.ndim == 1:
raise ValueError("Calibration data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
check_is_fitted(self, "logistic_regressor_")
return self.logistic_regressor_.predict_proba(X[:, 1].reshape(-1, 1))
elif np.shape(X)[1] > 2:
check_is_fitted(self, "onevsrest_calibrator_")
return self.onevsrest_calibrator_.predict_proba(X)
class IsotonicRegression(CalibrationMethod):
"""
Probability calibration using Isotonic Regression
Isotonic regression [1]_ [2]_ is a non-parametric approach to mapping (non-probabilistic) classifier scores to
probabilities. It assumes an isotonic (non-decreasing) relationship between classifier scores and probabilities.
Parameters
----------
out_of_bounds : string, optional, default: "clip"
The ``out_of_bounds`` parameter handles how x-values outside of the
training domain are handled. When set to "nan", predicted y-values
will be NaN. When set to "clip", predicted y-values will be
set to the value corresponding to the nearest train interval endpoint.
When set to "raise", allow ``interp1d`` to throw ValueError.
References
----------
.. [1] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, <NAME> & <NAME>, (KDD 2002)
.. [2] Predicting Good Probabilities with Supervised Learning,
<NAME> & <NAME>, ICML 2005
"""
def __init__(self, out_of_bounds="clip"):
super().__init__()
self.out_of_bounds = out_of_bounds
def fit(self, X, y, n_jobs=None):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
Returns
-------
self : object
Returns an instance of self.
"""
if X.ndim == 1:
raise ValueError("Calibration training data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
self.isotonic_regressor_ = sklearn.isotonic.IsotonicRegression(increasing=True,
out_of_bounds=self.out_of_bounds)
self.isotonic_regressor_.fit(X[:, 1], y)
elif np.shape(X)[1] > 2:
self.onevsrest_calibrator_ = OneVsRestCalibrator(calibrator=clone(self), n_jobs=n_jobs)
self.onevsrest_calibrator_.fit(X, y)
return self
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
if X.ndim == 1:
raise ValueError("Calibration data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
check_is_fitted(self, "isotonic_regressor_")
p1 = self.isotonic_regressor_.predict(X[:, 1])
return np.column_stack([1 - p1, p1])
elif np.shape(X)[1] > 2:
check_is_fitted(self, "onevsrest_calibrator_")
return self.onevsrest_calibrator_.predict_proba(X)
class HistogramBinning(CalibrationMethod):
"""
Probability calibration using histogram binning
Histogram binning [1]_ is a nonparametric approach to probability calibration. Classifier scores are binned into a
given number of bins either based on fixed width or frequency. Classifier scores are then computed based on the
empirical frequency of class 1 in each bin.
Parameters
----------
mode : str, default='equal_width'
Binning mode used. One of ['equal_width', 'equal_freq'].
n_bins : int, default=20
Number of bins to bin classifier scores into.
input_range : list, shape (2,), default=[0, 1]
Range of the classifier scores.
.. [1] <NAME>. & <NAME>. Obtaining calibrated probability estimates from decision trees and naive Bayesian
classifiers in Proceedings of the 18th International Conference on Machine Learning (ICML, 2001), 609–616.
"""
def __init__(self, mode='equal_width', n_bins=20, input_range=[0, 1]):
super().__init__()
if mode in ['equal_width', 'equal_freq']:
self.mode = mode
else:
raise ValueError("Mode not recognized. Choose on of 'equal_width', or 'equal_freq'.")
self.n_bins = n_bins
self.input_range = input_range
def fit(self, X, y, n_jobs=None):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
Returns
-------
self : object
Returns an instance of self.
"""
if X.ndim == 1:
raise ValueError("Calibration training data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
return self._fit_binary(X, y)
elif np.shape(X)[1] > 2:
self.onevsrest_calibrator_ = OneVsRestCalibrator(calibrator=clone(self), n_jobs=n_jobs)
self.onevsrest_calibrator_.fit(X, y)
return self
def _fit_binary(self, X, y):
if self.mode == 'equal_width':
# Compute probability of class 1 in each equal width bin
binned_stat = scipy.stats.binned_statistic(x=X[:, 1], values=np.equal(1, y), statistic='mean',
bins=self.n_bins, range=self.input_range)
self.prob_class_1 = binned_stat.statistic # TODO: test this and correct attributes
self.binning = binned_stat.bin_edges
elif self.mode == 'equal_freq':
# Find binning based on equal frequency
self.binning = np.quantile(X[:, 1],
q=np.linspace(self.input_range[0], self.input_range[1], self.n_bins + 1))
# Compute probability of class 1 in equal frequency bins
digitized = np.digitize(X[:, 1], bins=self.binning)
digitized[digitized == len(self.binning)] = len(self.binning) - 1 # include rightmost edge in partition
self.prob_class_1 = [y[digitized == i].mean() for i in range(1, len(self.binning))]
return self
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
if X.ndim == 1:
raise ValueError("Calibration data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
check_is_fitted(self, ["binning", "prob_class_1"])
# Find bin of predictions
digitized = np.digitize(X[:, 1], bins=self.binning)
digitized[digitized == len(self.binning)] = len(self.binning) - 1 # include rightmost edge in partition
# Transform to empirical frequency of class 1 in each bin
p1 = np.array([self.prob_class_1[j] for j in (digitized - 1)])
# If empirical frequency is NaN, do not change prediction
p1 = np.where(np.isfinite(p1), p1, X[:, 1])
assert np.all(np.isfinite(p1)), "Predictions are not all finite."
return np.column_stack([1 - p1, p1])
elif np.shape(X)[1] > 2:
check_is_fitted(self, "onevsrest_calibrator_")
return self.onevsrest_calibrator_.predict_proba(X)
class BayesianBinningQuantiles(CalibrationMethod):
"""
Probability calibration using Bayesian binning into quantiles
Bayesian binning into quantiles [1]_ considers multiple equal frequency binning models and combines them through
Bayesian model averaging. Each binning model :math:`M` is scored according to
:math:`\\text{Score}(M) = P(M) \\cdot P(D | M),` where a uniform prior :math:`P(M)` is assumed. The marginal likelihood
:math:`P(D | M)` has a closed form solution under the assumption of independent binomial class distributions in each
bin with beta priors.
Parameters
----------
C : int, default = 10
Constant controlling the number of binning models.
input_range : list, shape (2,), default=[0, 1]
Range of the scores to calibrate.
.. [1] <NAME>., <NAME>. & <NAME>. Obtaining Well Calibrated Probabilities Using Bayesian Binning
in Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, Austin, Texas, USA.
"""
def __init__(self, C=10, input_range=[0, 1]):
super().__init__()
self.C = C
self.input_range = input_range
def _binning_model_logscore(self, probs, y, partition, N_prime=2):
"""
Compute the log score of a binning model
Each binning model :math:`M` is scored according to :math:`Score(M) = P(M) \\cdot P(D | M),` where a uniform prior
:math:`P(M)` is assumed and the marginal likelihood :math:`P(D | M)` has a closed form solution
under the assumption of a binomial class distribution in each bin with beta priors.
Parameters
----------
probs : array-like, shape (n_samples, )
Predicted posterior probabilities.
y : array-like, shape (n_samples, )
Target classes.
partition : array-like, shape (n_bins + 1, )
Interval partition defining a binning.
N_prime : int, default=2
Equivalent sample size expressing the strength of the belief in the prior distribution.
Returns
-------
log_score : float
Log of Bayesian score for a given binning model
"""
# Setup
B = len(partition) - 1
p = (partition[1:] - partition[:-1]) / 2 + partition[:-1]
# Compute positive and negative samples in given bins
N = np.histogram(probs, bins=partition)[0]
digitized = np.digitize(probs, bins=partition)
digitized[digitized == len(partition)] = len(partition) - 1 # include rightmost edge in partition
m = [y[digitized == i].sum() for i in range(1, len(partition))]
n = N - m
# Compute the parameters of the Beta priors
tiny = np.finfo(np.float).tiny # Avoid scipy.special.gammaln(0), which can arise if bin has zero width
alpha = N_prime / B * p
alpha[alpha == 0] = tiny
beta = N_prime / B * (1 - p)
beta[beta == 0] = tiny
# Prior for a given binning model (uniform)
log_prior = - np.log(self.T)
# Compute the marginal log-likelihood for the given binning model
log_likelihood = np.sum(
scipy.special.gammaln(N_prime / B) + scipy.special.gammaln(m + alpha) + scipy.special.gammaln(n + beta) - (
scipy.special.gammaln(N + N_prime / B) + scipy.special.gammaln(alpha) + scipy.special.gammaln(
beta)))
# Compute score for the given binning model
log_score = log_prior + log_likelihood
return log_score
def fit(self, X, y, n_jobs=None):
"""
Fit the calibration method based on the given uncalibrated class probabilities X and ground truth labels y.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
Training data, i.e. predicted probabilities of the base classifier on the calibration set.
y : array-like, shape (n_samples,)
Target classes.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details.
Returns
-------
self : object
Returns an instance of self.
"""
if X.ndim == 1:
raise ValueError("Calibration training data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
self.binnings = []
self.log_scores = []
self.prob_class_1 = []
self.T = 0
return self._fit_binary(X, y)
elif np.shape(X)[1] > 2:
self.onevsrest_calibrator_ = OneVsRestCalibrator(calibrator=clone(self), n_jobs=n_jobs)
self.onevsrest_calibrator_.fit(X, y)
return self
def _fit_binary(self, X, y):
# Determine number of bins
N = len(y)
min_bins = int(max(1, np.floor(N ** (1 / 3) / self.C)))
max_bins = int(min(np.ceil(N / 5), np.ceil(self.C * N ** (1 / 3))))
self.T = max_bins - min_bins + 1
# Define (equal frequency) binning models and compute scores
self.binnings = []
self.log_scores = []
self.prob_class_1 = []
for i, n_bins in enumerate(range(min_bins, max_bins + 1)):
# Compute binning from data and set outer edges to range
binning_tmp = np.quantile(X[:, 1], q=np.linspace(self.input_range[0], self.input_range[1], n_bins + 1))
binning_tmp[0] = self.input_range[0]
binning_tmp[-1] = self.input_range[1]
# Enforce monotonicity of binning (np.quantile does not guarantee monotonicity)
self.binnings.append(np.maximum.accumulate(binning_tmp))
# Compute score
self.log_scores.append(self._binning_model_logscore(probs=X[:, 1], y=y, partition=self.binnings[i]))
# Compute empirical accuracy for all bins
digitized = np.digitize(X[:, 1], bins=self.binnings[i])
# include rightmost edge in partition
digitized[digitized == len(self.binnings[i])] = len(self.binnings[i]) - 1
def empty_safe_bin_mean(a, empty_value):
"""
Assign the bin mean to an empty bin. Corresponds to prior assumption of the underlying classifier
being calibrated.
"""
if a.size == 0:
return empty_value
else:
return a.mean()
self.prob_class_1.append(
[empty_safe_bin_mean(y[digitized == k], empty_value=(self.binnings[i][k] + self.binnings[i][k - 1]) / 2)
for k in range(1, len(self.binnings[i]))])
return self
def predict_proba(self, X):
"""
Compute calibrated posterior probabilities for a given array of posterior probabilities from an arbitrary
classifier.
Parameters
----------
X : array-like, shape (n_samples, n_classes)
The uncalibrated posterior probabilities.
Returns
-------
P : array, shape (n_samples, n_classes)
The predicted probabilities.
"""
if X.ndim == 1:
raise ValueError("Calibration data must have shape (n_samples, n_classes).")
elif np.shape(X)[1] == 2:
check_is_fitted(self, ["binnings", "log_scores", "prob_class_1", "T"])
# Find bin for all binnings and the associated empirical accuracy
posterior_prob_binnings = np.zeros(shape=[np.shape(X)[0], len(self.binnings)])
for i, binning in enumerate(self.binnings):
bin_ids = np.searchsorted(binning, X[:, 1])
bin_ids = np.clip(bin_ids, a_min=0, a_max=len(binning) - 1) # necessary if X is out of range
posterior_prob_binnings[:, i] = [self.prob_class_1[i][j] for j in (bin_ids - 1)]
# Computed score-weighted average
norm_weights = np.exp(np.array(self.log_scores) - scipy.special.logsumexp(self.log_scores))
posterior_prob = np.sum(posterior_prob_binnings * norm_weights, axis=1)
# Compute probability for other class
return np.column_stack([1 - posterior_prob, posterior_prob])
elif | np.shape(X) | numpy.shape |
import argparse
import os
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from tqdm import tqdm
from models.singlestream_v2.baseline_retrieval import ALBEF
from models.vit import interpolate_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer
@torch.no_grad()
def evaluation(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print('Computing features for evaluation...')
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_feats = []
text_embeds = []
text_atts = []
text_inputs = []
for i in tqdm(range(0, num_text, text_bs)):
text = texts[i: min(num_text, i+text_bs)]
text_input = tokenizer(text, padding='max_length', truncation=True, max_length=30, return_tensors="pt").to(device)
text_inputs.append(text_input)
text_output = model.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text')
text_feat = text_output.last_hidden_state
text_embed = F.normalize(model.text_proj(text_feat[:,0,:]))
text_embeds.append(text_embed)
text_feats.append(text_feat)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds,dim=0)
text_feats = torch.cat(text_feats,dim=0)
text_atts = torch.cat(text_atts,dim=0)
text_tokens = torch.cat([_.input_ids for _ in text_inputs], dim=0)
image_feats = []
image_embeds = []
for image, img_id in tqdm(data_loader):
image = image.to(device)
image_feat = model.visual_encoder(image)
image_embed = model.vision_proj(image_feat[:,0,:])
image_embed = F.normalize(image_embed,dim=-1)
image_feats.append(image_feat)
image_embeds.append(image_embed)
image_feats = torch.cat(image_feats,dim=0)
image_embeds = torch.cat(image_embeds,dim=0)
sims_matrix = image_embeds @ text_embeds.t()
# sims_matrix = torch.Tensor(np.load('/net/acadia10a/data/zkhan/albef-sims/albef-epoch30-sims.npy')).to(device)
score_matrix_i2t = torch.full((len(data_loader.dataset.image),len(texts)),-100.0).to(device)
num_tasks = utils.get_world_size()
rank = utils.get_rank()
step = sims_matrix.size(0)//num_tasks + 1
start = rank*step
end = min(sims_matrix.size(0),start+step)
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = image_feats[start+i].repeat(config['k_test'],1,1)
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device)
output = model.text_encoder(text_tokens[topk_idx],
attention_mask = text_atts[topk_idx],
encoder_hidden_states = encoder_output,
encoder_attention_mask = encoder_att,
return_dict = True,
mode = 'multimodal'
)
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
score_matrix_i2t[start+i,topk_idx] = score
sims_matrix = sims_matrix.t()
score_matrix_t2i = torch.full((len(texts),len(data_loader.dataset.image)),-100.0).to(device)
step = sims_matrix.size(0)//num_tasks + 1
start = rank*step
end = min(sims_matrix.size(0),start+step)
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = image_feats[topk_idx]
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device)
output = model.text_encoder(text_tokens[start+i].repeat(config['k_test'],1),
attention_mask = text_atts[start+i].repeat(config['k_test'],1),
encoder_hidden_states = encoder_output,
encoder_attention_mask = encoder_att,
return_dict = True,
mode = 'multimodal'
)
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
score_matrix_t2i[start+i,topk_idx] = score
if args.distributed:
dist.barrier()
torch.distributed.all_reduce(score_matrix_i2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(score_matrix_t2i, op=torch.distributed.ReduceOp.SUM)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Evaluation time {}'.format(total_time_str))
return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
@torch.no_grad()
def itm_eval(scores_i2t, scores_t2i, txt2img, img2txt):
#Images->Text
ranks = np.zeros(scores_i2t.shape[0])
for index,score in enumerate(scores_i2t):
inds = np.argsort(score)[::-1]
# Score
rank = 1e20
for i in img2txt[index]:
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len( | np.where(ranks < 5) | numpy.where |
import os
import json
import argparse
import logging
from pathlib import Path
import time
from typing import Tuple
import pandas as pd
import numpy as np
import tqdm
from melloddy_tuner.utils import hash_reference_set
from melloddy_tuner.utils.helper import (
load_config,
load_key,
make_dir,
read_input_file,
create_log_files,
sanity_check_assay_sizes,
sanity_check_assay_type,
sanity_check_uniqueness,
save_df_as_csv,
)
from melloddy_tuner.utils.config import ConfigDict
from multiprocessing import Pool
def init_arg_parser():
"""Argparser module to load commandline arguments.
Returns:
[Namespace]: Arguments from argparser
"""
parser = argparse.ArgumentParser(description="smiles standardization")
parser.add_argument(
"-assay",
"--assay_file",
type=str,
help="path of the assay metadata file T0",
required=True,
)
parser.add_argument(
"-a",
"--activity_file",
type=str,
help="path of the activity data file T1",
required=True,
)
parser.add_argument(
"-mt",
"--mapping_table",
type=str,
help="path of the mapping table T5",
required=True,
)
parser.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
parser.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
parser.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
parser.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
parser.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
parser.add_argument(
"-cpu", "--number_cpu", type=int, help="number of CPUs", default=1
)
args = parser.parse_args()
return args
def most_common_qualifier(qualifiers: list) -> str:
"""Determines the most common qualifier, in case of a tie including '=' returns '='
Input:
qualifiers - list of qualifiers, accepted values '<', '>' and '='
Output:
str: the most common qualifier. In case of a tie prefers '='. If a tie is between '<' and '>' - returns None
"""
counts = []
for qual in ["<", ">", "="]:
counts.append((qual, qualifiers.count(qual)))
counts.sort(key=lambda tup: tup[1], reverse=True)
if counts[0][1] > counts[1][1]:
return counts[0][0]
elif counts[0][0] == "=" or counts[1][0] == "=" or counts[2][1] == counts[0][1]:
return "="
else:
return None
def aggr_median(values, qualifiers) -> Tuple:
"""Identifies median of values and the most common qualifier"""
return np.median(values), most_common_qualifier(list(qualifiers))
def aggr_min(values, qualifiers) -> Tuple:
"""Identifies the minimum value and teh corresponding qualifier"""
return values[np.argmin(values)], qualifiers[np.argmin(values)]
def aggr_max(values, qualifiers) -> Tuple:
"""Identifies the maximum values and the corresponding qualifier
If '<' qualifier is present, only those elements ae considered
"""
if (">" in qualifiers) or ("=" in qualifiers):
mask = np.array([i for i in range(len(qualifiers)) if qualifiers[i] != "<"])
ind = mask[np.argmax( | np.array(values) | numpy.array |
from math import exp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
# from kornia.color import rgb_to_yuv
from torch.nn.modules.loss import _Loss
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
if normalize:
mssim = (mssim + 1) / 2
mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output
# Classes to re-use window
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
# TODO: store window between calls if possible
return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class VGG(torch.nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if conv_index == '22':
self.vgg = nn.Sequential(*modules[:8])
elif conv_index == '54':
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range)
self.sub_mean = MeanShift(rgb_range, vgg_mean, vgg_std)
self.vgg.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.l1_loss(vgg_sr, vgg_hr)
return loss
def color_loss(out, target):
out_yuv = rgb_to_yuv(out)
out_u = out_yuv[:, 1, :, :]
out_v = out_yuv[:, 2, :, :]
target_yuv = rgb_to_yuv(target)
target_u = target_yuv[:, 1, :, :]
target_v = target_yuv[:, 2, :, :]
return torch.div(torch.mean((out_u - target_u).pow(1)).abs() + torch.mean((out_v - target_v).pow(1)).abs(), 2)
class BurstLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(BurstLoss, self).__init__(size_average, reduce, reduction)
self.reduction = reduction
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
prewitt_filter = 1 / 6 * np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
self.prewitt_filter_horizontal = torch.nn.Conv2d(in_channels=1, out_channels=1,
kernel_size=prewitt_filter.shape,
padding=prewitt_filter.shape[0] // 2).to(device)
self.prewitt_filter_horizontal.weight.data.copy_(torch.from_numpy(prewitt_filter).to(device))
self.prewitt_filter_horizontal.bias.data.copy_(torch.from_numpy( | np.array([0.0]) | numpy.array |
import numpy as np
from os import path
def calcZScore(data):
# # of std deviation away from mean
mn = | np.mean(data) | numpy.mean |
#!/usr/bin/env python3
import rawpy
import imageio
import numpy as np
import scipy.stats as stats
from scipy.optimize import minimize_scalar
from astropy.io import fits
import argparse
import os.path
def weighted_var(values, weights):
"""
Return the weighted variance.
values, weights -- Numpy ndarrays with the same shape.
"""
average = np.average(values, weights=weights)
# Fast and numerically precise:
variance = np.average((values - average)**2, weights=weights)
return variance
def optimize_dark_numerical(light, dark):
# try numerical optimization
fun = lambda alpha: weighted_var(np.round(np.clip((light - alpha*dark).flatten(), 0, np.inf)),
dark.flatten())
r = minimize_scalar(fun, [0, 2])
if r.success:
print(f"Successful optimization.")
else:
print("Optimization not successful.")
print(r)
return r.x
def optimize_dark_fast(light, dark):
""" Find optimal dark frame scaling by minimizing
global dark current-weighted image variance.
"""
light, dark = light.flatten(), dark.flatten()
light_avg = np.average(light, weights=dark)
dark_avg = | np.average(dark, weights=dark) | numpy.average |
'''
Functions that help visualize results
'''
import os
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
from . import config
__all__ = ['plotter',
'segment_plotter',
'plot_poincare',
'plot_breathing']
def plotter(working_data, measures, show=True, figsize=None,
title='Heart Rate Signal Peak Detection', moving_average=False): # pragma: no cover
'''plots the analysis results.
Function that uses calculated measures and data stored in the working_data{} and measures{}
dict objects to visualise the fitted peak detection solution.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
when False, function will return a plot object rather than display the results.
default : True
figsize: tuple
Set dimensions of image in inches like in matplotlib. figsize=(x, y)
default: None => (6.4, 4.8)
title : string
title for the plot.
default : "Heart Rate Signal Peak Detection"
moving_average : bool
whether to display the moving average on the plot.
The moving average is used for peak fitting.
default: False
Returns
-------
out : matplotlib plot object
only returned if show == False.
Examples
--------
First let's load and analyse some data to visualise
>>> import heartpy as hp
>>> data, _ = hp.load_exampledata(0)
>>> wd, m = hp.process(data, 100.0)
Then we can visualise
>>> plot_object = plotter(wd, m, show=False, title='some awesome title')
This returns a plot object which can be visualized or saved or appended.
See matplotlib API for more information on how to do this.
A matplotlib plotting object is returned. This can be further processed and saved
to a file.
'''
#get color palette
colorpalette = config.get_colorpalette_plotter()
# create plot x-var
fs = working_data['sample_rate']
plotx = np.arange(0, len(working_data['hr'])/fs, 1/fs)
#check if there's a rounding error causing differing lengths of plotx and signal
diff = len(plotx) - len(working_data['hr'])
if diff < 0:
#add to linspace
plotx = np.append(plotx, plotx[-1] + (plotx[-2] - plotx[-1]))
elif diff > 0:
#trim linspace
plotx = plotx[0:-diff]
peaklist = working_data['peaklist']
ybeat = working_data['ybeat']
rejectedpeaks = working_data['removed_beats']
rejectedpeaks_y = working_data['removed_beats_y']
fig, ax = plt.subplots(figsize=figsize)
ax.set_title(title)
ax.plot(plotx, working_data['hr'], color=colorpalette[0], label='heart rate signal', zorder=-10)
ax.set_xlabel('Time (s)')
if moving_average:
ax.plot(plotx, working_data['rolling_mean'], color='gray', alpha=0.5)
ax.scatter(np.asarray(peaklist)/fs, ybeat, color=colorpalette[1], label='BPM:%.2f' %(measures['bpm']))
ax.scatter(rejectedpeaks/fs, rejectedpeaks_y, color=colorpalette[2], label='rejected peaks')
#check if rejected segment detection is on and has rejected segments
try:
if len(working_data['rejected_segments']) >= 1:
for segment in working_data['rejected_segments']:
ax.axvspan(segment[0], segment[1], facecolor='red', alpha=0.5)
except:
pass
ax.legend(loc=4, framealpha=0.6)
if show:
fig.show()
else:
return fig
def segment_plotter(working_data, measures, title='Heart Rate Signal Peak Detection',
figsize=(6, 6), path='', start=0, end=None, step=1): # pragma: no cover
'''plots analysis results
Function that plots the results of segmentwise processing of heart rate signal
and writes all results to separate files at the path provided.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
title : str
the title used in the plot
figsize : tuple
figsize tuple to be passed to matplotlib
path : str
the path where the files will be stored, folder must exist.
start : int
what segment to start plotting with
default : 0
end : int
last segment to plot. Must be smaller than total number of segments
default : None, will plot until end
step : int
stepsize used when iterating over plots every step'th segment will be plotted
default : 1
Returns
-------
None
Examples
--------
This function has no examples. See documentation of heartpy for more info.
'''
#sanity check
assert 0 < step < len(working_data['hr']), 'step must be larger than zero and smaller than total number of segments'
#set endpoint if not explicitly defined
if end == None:
end = len(working_data['hr'])
else:
#make sure it is defined within boundary conditions
assert end <= len(working_data['hr']), 'defined "end" endpoint is larger than number of segments'
#add trailing path slash if user omitted it
if not (path.endswith('/') or path.endswith('\\')) and len(path) > 0:
path += '/'
#create path if it doesn't exist
if not os.path.isdir(path):
os.makedirs(path)
#make plots
filenum = 0
for i in range(start, end, step):
wd_segment = {}
m_segment = {}
#assign values to sub-object for plotting purposes
wd_segment['peaklist'] = working_data['peaklist'][i]
wd_segment['ybeat'] = working_data['ybeat'][i]
wd_segment['removed_beats'] = working_data['removed_beats'][i]
wd_segment['removed_beats_y'] = working_data['removed_beats_y'][i]
wd_segment['hr'] = working_data['hr'][i]
wd_segment['rolling_mean'] = working_data['rolling_mean'][i]
wd_segment['sample_rate'] = working_data['sample_rate'][i]
m_segment['bpm'] = measures['bpm'][i]
try:
wd_segment['rejected_segments'] = working_data['rejected_segments'][i]
except:
pass
#plot it using built-in plotter
plt.figure(figsize = figsize)
p = plotter(wd_segment, m_segment, show=False)
p.savefig('%s%i.png' %(path, filenum))
plt.close('all')
filenum += 1
def plot_poincare(working_data, measures, show = True, figsize=None,
title='Poincare plot'): # pragma: no cover
'''visualize poincare plot
function that visualises poincare plot.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
whether to show the plot right away, or return a matplotlib object for
further manipulation
figsize: tuple
Set dimensions of image in inches like in matplotlib. figsize=(x, y)
default: None => (6.4, 4.8)
title : str
the title used in the plot
Returns
-------
out : matplotlib plot object
only returned if show == False.
Examples
--------
This function has no examples. See documentation of heartpy for more info.
'''
#get color palette
colorpalette = config.get_colorpalette_poincare()
#get values from dict
x_plus = working_data['poincare']['x_plus']
x_minus = working_data['poincare']['x_minus']
sd1 = measures['sd1']
sd2 = measures['sd2']
#define figure
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'}, figsize=figsize)
#plot scatter
ax.scatter(x_plus, x_minus, color = colorpalette[0],
alpha = 0.75, label = 'peak-peak intervals')
#plot identity line
mins = np.min([x_plus, x_minus])
maxs = np.max([x_plus, x_minus])
identity_line = np.linspace(np.min(mins), np.max(maxs))
ax.plot(identity_line, identity_line, color='black', alpha=0.5,
label = 'identity line')
#rotate SD1, SD2 vectors 45 degrees counterclockwise
sd1_xrot, sd1_yrot = rotate_vec(0, sd1, 45)
sd2_xrot, sd2_yrot = rotate_vec(0, sd2, 45)
#plot rotated SD1, SD2 lines
ax.plot([np.mean(x_plus), np.mean(x_plus) + sd1_xrot],
[np.mean(x_minus), np.mean(x_minus) + sd1_yrot],
color = colorpalette[1], label = 'SD1')
ax.plot([np.mean(x_plus), np.mean(x_plus) - sd2_xrot],
[np.mean(x_minus), np.mean(x_minus) + sd2_yrot],
color = colorpalette[2], label = 'SD2')
#plot ellipse
xmn = np.mean(x_plus)
ymn = np.mean(x_minus)
el = Ellipse((xmn, ymn), width = sd2 * 2, height = sd1 * 2, angle = 45.0)
ax.add_artist(el)
el.set_edgecolor((0,0,0))
el.fill = False
ax.set_xlabel(r'RRi$_n$ (ms)')
ax.set_ylabel(r'RRi$_{n+1}$ (ms)')
ax.legend(loc=4, framealpha=0.6)
ax.set_title(title)
if show:
fig.show()
else:
return fig
def rotate_vec(x, y, angle):
'''rotates vector around origin point
Function that takes vector and angle, and rotates around origin point
with given amount of degrees.
Helper function for poincare plotting
Parameters
----------
x : int or float
vector x coordinate
y : int or float
vector y coordinate
angle: int or float
the angle of rotation applied to the vecftor
Returns
-------
x_rot : float
new x coordinate with rotation applied
y_rot : float
new x coordinate with rotation applied
Examples
--------
Given a vector (0,1), if we apply a rotation of 90 degrees clockwise
we expect to get (1,0). Let's test
>>> x_new, y_new = rotate_vec(0, 1, -90)
>>> print('%.3f, %.3f' %(x_new, y_new))
1.000, 0.000
'''
theta = np.radians(angle)
cs = | np.cos(theta) | numpy.cos |
import numpy as np
class BinaryCriticalPoints(object):
"""
Class for finding critical points on for binary phase
diagrams.
"""
def __init__(self):
pass
def coexistence_points(self, phase1, phase2):
"""
Find coexistence points.
:param np.ndarray phase1: Second order polynomial for phase1
:param np.ndarray phase2: Second order polynomial for phase2
"""
delta_a = phase2[2] - phase1[2]
delta_b = phase2[1] - phase1[1]
c1 = phase1[0]
c2 = phase2[0]
# Calculate points that enters in second order
# equation (x - B)**2 = C
B = 0.5*c2*delta_b/(c1*c2 - c2**2)
C = (4*c1*delta_a + delta_b**2)/(4*c1*c2 - 4*c2**2)
x2_minus = B - np.sqrt(B**2 + C)
x1_minus = 0.5*(delta_b + 2*c2*x2_minus)/c1
x2_pluss = B + np.sqrt(B**2 + C)
x1_pluss = 0.5*(delta_b + 2*c2*x2_pluss)/c1
if self._in_interval(x1_minus) and self._in_interval(x2_minus):
x1 = x1_minus
x2 = x2_minus
elif self._in_interval(x1_pluss) and self._in_interval(x2_pluss):
x1 = x1_pluss
x2 = x2_pluss
else:
raise ValueError("Did not find any co-existence point!")
return x1, x2
def _in_interval(self, x):
return x > 0.0 and x < 1.0
def spinodal(self, phase):
"""
Find the spinodal line
"""
double_deriv = np.polyder(phase, m=2)
roots = np.roots(double_deriv)
return np.real(roots[~np.iscomplex(roots)])
def plot(self, x, y, polys=[]):
from matplotlib import pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x, y)
for p in polys:
ax.plot(x, np.polyval(p, x))
ax.set_xlabel("Concentration")
ax.set_ylabel("Free energy")
y_range = | np.max(y) | numpy.max |
import time, sys, os
import dill as pickle
import numpy as np
import scipy.constants as constants
import scipy.interpolate as interp
import scipy.optimize as opti
import scipy.signal as signal
import numdifftools as ndt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import grav_util_3 as gu
import bead_util as bu
import configuration as config
import warnings
warnings.filterwarnings("ignore")
theory_data_dir = '/data/grav_sim_data/2um_spacing_data/'
patches_base_path = '/processed_data/comsol_data/patch_potentials/'
patches_name = 'patch_pot_2um_0Vrms_bias-1Vdc'
# Include some legacy grav data to compare to later
data_dirs = [#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz', \
#\
#'/data/20180704/bead1/grav_data/shield', \
#\
#'/data/20180808/bead4/grav_data/shield1' \
#\
'/data/20180827/bead2/500e_data/dipole_v_height_ac' \
#'/data/20180904/bead1/cant_force/ac_cant_elec_force_10s', \
#'/data/20180904/bead1/recharged_20180909/cant_force/acgrid_3freqs_10s'
]
load_files = False
p0_bead_dict = {'20180625': [19.0, 40.0, 20.0], \
'20180704': [18.7, 40.0, 20.0], \
'20180808': [18.0, 40.0, 20.0], \
'20180827': [35.0 ,40.0, 15.0]
}
p0_bead_dict = {'20180827': [0.0 ,0.0, 0.0], \
'20180904': [0.0 ,0.0, 0.0]
}
opt_ext = '_electrostatics'
harms = [1]
#harms = [1,2,3,4,5]
charge = 430 * constants.elementary_charge * (-1.0)
plot_field_test = False
plot_cost_function = False
plot_keyscale = 1.0e-13
maxfreq = 200
dim3 = False
unity_errors = False
init = [15.0, 0.0, 28.0, -50]
init_rot = [15.0, 0.0, 28.0, -50, 0.0, 0.0, 0.0]
############################################################
############################################################
xx = np.load(open(patches_base_path + patches_name + '.xx', 'rb'))
yy = np.load(open(patches_base_path + patches_name + '.yy', 'rb'))
zz = np.load(open(patches_base_path + patches_name + '.zz', 'rb'))
dx = xx[1] - xx[0]
dy = yy[1] - yy[0]
dz = zz[1] - zz[0]
field = np.load(open(patches_base_path + patches_name + '.field', 'rb'))
potential = np.load(open(patches_base_path + patches_name + '.potential', 'rb'))
print(field[0].shape)
gradE = []
gradE_func = []
for resp in [0,1,2]:
gradE.append( np.gradient(field[resp], dx, dy, dz)[resp] )
gradE_func.append( interp.RegularGridInterpolator((xx, yy, zz), gradE[resp], \
bounds_error=False, \
fill_value=None) )
pot_func = interp.RegularGridInterpolator((xx, yy, zz), potential, \
bounds_error=False, fill_value=None)
field_func = []
for resp in [0,1,2]:
field_func.append( interp.RegularGridInterpolator((xx, yy, zz), field[resp], \
bounds_error=False, fill_value=None) )
if plot_field_test:
posvec = np.linspace(-20e-6, 20e-6, 101)
posvec = np.linspace(-5e-6, 100e-6, 106)
ones = np.ones_like(posvec)
xval = 20.0e-6
yval = 0.0e-6
zval = 0.0e-6
eval_pts = np.stack((posvec, yval*ones, zval*ones), axis=-1)
#eval_pts = np.stack((xval*ones, posvec, zval*ones), axis=-1)
#eval_pts = np.stack((xval*ones, yval*ones, posvec), axis=-1)
ann_str = 'Sep: %0.2f um, Height: %0.2f um' % (xval*1e6, zval*1e6)
plt.figure()
plt.plot(posvec*1e6, pot_func(eval_pts))
plt.figure(figsize=(7,5))
#plt.title(name)
plt.plot(posvec*1e6, field_func[0](eval_pts)*charge, label='fx')
plt.plot(posvec*1e6, field_func[1](eval_pts)*charge, label='fy')
plt.plot(posvec*1e6, field_func[2](eval_pts)*charge, label='fz')
plt.legend()
plt.xlabel('Displacement Along Attractor [um]')
plt.ylabel('Force on 500e$^-$ [N]')
plt.annotate(ann_str, xy=(0.2, 0.9), xycoords='axes fraction')
plt.tight_layout()
plt.grid()
plt.show()
############################################################
############################################################
############################################################
############################################################
for ddir in data_dirs:
paths = gu.build_paths(ddir, opt_ext=opt_ext)
datafiles = bu.find_all_fnames(ddir)
p0_bead = p0_bead_dict[paths['date']]
if load_files:
agg_dat = gu.AggregateData(datafiles, p0_bead=p0_bead, harms=harms, \
elec_drive=True, elec_ind=0, maxfreq=maxfreq, \
plot_harm_extraction=False, dim3=dim3)
agg_dat.save(paths['agg_path'])
else:
agg_dat = gu.AggregateData([], p0_bead=p0_bead, harms=harms, dim3=dim3)
agg_dat.load(paths['agg_path'])
agg_dat.bin_rough_stage_positions(ax_disc=1.0, dim3=dim3)
agg_dat.handle_sparse_binning(dim3=dim3, verbose=False)
#print 'Testing sparse handling...',
#agg_dat.handle_sparse_binning(dim3=dim3, verbose=True)
#print 'Done!'
#raw_input()
#print agg_dat.ax0vec
#print agg_dat.ax1vec
#print agg_dat.ax2vec
#print len(agg_dat.file_data_objs)
force_plane_dict = agg_dat.get_vector_force_plane(plot_resp=(0,2), fig_ind=1, \
plot=True, show=False, \
sign=[1.0, 1.0, 1.0], dim3=dim3, \
keyscale=plot_keyscale)
force_plane_dict_2 = agg_dat.get_vector_force_plane(plot_resp=(1,2), fig_ind=2, \
plot=True, show=True, \
sign=[1.0, 1.0, 1.0], dim3=dim3, \
keyscale=plot_keyscale)
err_dict = {0: 'xerr', 1: 'yerr', 2: 'zerr'}
dat = [[], [], []]
err = [[], [], []]
for resp in [0,1,2]:
dat[resp] = force_plane_dict[resp]
err[resp] = force_plane_dict[err_dict[resp]]
dat = np.array(dat)
err = np.array(err)
#for i in range(10):
# print force_plane_dict['drive'][i,:,:]
# raw_input()
volt_drive = np.mean(force_plane_dict['drive'])
print('Voltage Drive: ', volt_drive)
#raw_input()
hist_scale_fac = np.std(dat) * 0.1
scale_fac = 1.0
dat_sc = dat * (1.0 / scale_fac)
err_sc = err * (1.0 / scale_fac)
if unity_errors:
err_sc = err_sc - err_sc + 1.0
pos_dict = agg_dat.make_ax_arrs(dim3=dim3)
seps = pos_dict['seps']
heights = pos_dict['heights']
if dim3:
yposvec = pos_dict['ypos']
seps_g, ypos_g, heights_g = np.meshgrid(seps, yposvec, heights, indexing='ij')
else:
yposvec = np.array([0.0])
seps_g, heights_g = np.meshgrid(seps, heights, indexing='ij')
rot_point = []
def F_comsol_func(sep_off, ypos, height_off, charge, eval_resp=0, \
rot_angles=[0.0, 0.0, 0.0], rot_point=[], \
radians=False, plot_rot=False, \
add_dipole=False, dipole_moment=0.0, \
dim3=False):
if dim3:
interp_mesh = np.array([(seps_g + sep_off) * 1.0e-6, \
(ypos_g + ypos) * 1.0e-6, \
(heights_g + height_off) * 1.0e-6])
else:
interp_mesh = np.array(np.meshgrid((seps+sep_off)*1e-6, (yposvec+ypos)*1e-6,
(heights+height_off)*1e-6, indexing='ij'))
interp_points = np.rollaxis(interp_mesh, 0, 4)
interp_points = interp_points.reshape((interp_mesh.size // 3, 3))
npts = interp_points.shape[0]
rot_matrix = bu.euler_rotation_matrix(rot_angles, radians=radians)
if not len(rot_point):
p0 = []
for resp in [0,1,2]:
p0.append( np.mean(interp_points[:,resp]))
else:
p0 = rot_point
p0 = np.array(p0)
rot_pts = []
for resp in [0,1,2]:
rot_pts_vec = np.zeros(npts)
for resp2 in [0,1,2]:
rot_pts_vec += rot_matrix[resp,resp2] * (interp_points[:,resp2] - p0[resp2])
rot_pts.append(rot_pts_vec)
rot_pts_vec += p0[resp]
rot_pts = np.array(rot_pts)
rot_pts = rot_pts.T
if plot_rot:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(interp_points[:,0]*1e6, interp_points[:,1]*1e6, \
interp_points[:,2]*1e6, label='Original')
ax.scatter(rot_pts[:,0]*1e6, rot_pts[:,1]*1e6, rot_pts[:,2]*1e6, \
label='Rot')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
field_res = interp.interpn((xx, yy, zz), field[eval_resp], rot_pts, \
bounds_error=False, fill_value=None)
if dim3:
shaped = np.reshape(field_res, (len(seps), len(yposvec), len(heights)))
else:
shaped = np.reshape(field_res, (len(seps), len(heights)))
out = shaped * charge * constants.elementary_charge
return out #* volt_drive
'''
Fcomsol = []
for resp in [0,1,2]:
Fcomsol.append(F_comsol_func(15, 0, 28, -50, eval_resp=resp, \
rot_angles=[0.0, 0.0, 0.0], rot_point=[], \
radians=False, plot_rot=False, \
add_dipole=False, dipole_moment=0.0, \
dim3=False))
fig = plt.figure(7)
ax = fig.add_subplot(111)
qdat = ax.quiver(seps_g, heights_g, dat[0], dat[2], \
color='k', pivot='mid', label='Force', scale=plot_keyscale*4)
qcom = ax.quiver(seps_g, heights_g, Fcomsol[0], Fcomsol[2], \
color='r', pivot='mid', label='Error', scale=plot_keyscale*4)
ax.set_xlabel('Separation [um]')
ax.set_ylabel('Height [um]')
plt.show()
'''
def cost_function(params, Ndof=True):
delta_sep, ypos, delta_height, charge = params
cost = 0.0
N = 0
for resp in [0,1,2]:
func_vals = F_comsol_func(delta_sep, ypos, delta_height, charge, eval_resp=resp, \
dim3=dim3)
func_vals *= (1.0 / scale_fac)
diff_sq = np.abs( dat_sc[resp] - func_vals )**2
#var = (np.ones_like(func_vals))**2
var = (err_sc[resp])**2
cost += np.sum( diff_sq / var )
N += diff_sq.size
if Ndof:
cost *= (1.0 / float(N))
return 0.5 * cost
def cost_function_rot(params, Ndof=True):
delta_sep, ypos, delta_height, charge, rotx, roty, rotz = params
cost = 0.0
N = 0
for resp in [0,1,2]:
func_vals = F_comsol_func(delta_sep, ypos, delta_height, charge, \
rot_angles=[rotx, roty, rotz], rot_point=rot_point, \
radians=False, eval_resp=resp, \
dim3=dim3)
func_vals *= (1.0 / scale_fac)
diff_sq = np.abs( dat_sc[resp] - func_vals )**2
#var = (np.ones_like(func_vals))**2
var = (err_sc[resp])**2
cost += np.sum( diff_sq / var )
N += diff_sq.size
if Ndof:
cost *= (1.0 / float(N))
window = signal.tukey(91,0.8)
angles = np.linspace(-45,45,91)
penalty_box = interp.interp1d(angles, 1.0-window, bounds_error=False, fill_value=1.0)
ang_penalty = 1.0e5 * ( penalty_box(rotx) + penalty_box(roty) + penalty_box(rotz))
return 0.5 * cost + ang_penalty
def diff_function(params, axes=[0,1,2]):
delta_sep, ypos, delta_height, charge = params
diff = []
for resp in [0,1,2]:
if resp not in axes:
continue
func_vals = F_comsol_func(delta_sep, ypos, delta_height, charge, eval_resp=resp, \
dim3=dim3)
func_vals *= (1.0 / scale_fac)
diff += list( ((dat_sc[resp] - func_vals) / err_sc[resp]).flatten() )
return np.array(diff)
def diff_function_rot(params, axes=[0,1,2], no_penalty=False):
delta_sep, ypos, delta_height, charge, rotx, roty, rotz = params
#rotz = 0
diff = []
for resp in [0,1,2]:
if resp not in axes:
continue
func_vals = F_comsol_func(delta_sep, ypos, delta_height, charge, \
rot_angles=[rotx, roty, rotz], \
rot_point=rot_point,\
radians=False, eval_resp=resp, \
dim3=dim3)
func_vals *= (1.0 / scale_fac)
diff += list( ((func_vals - dat_sc[resp]) / err_sc[resp]).flatten() )
window = signal.tukey(91,0.8)
angles = np.linspace(-45,45,91)
penalty_box = interp.interp1d(angles, 2.5*(1.0-window) + 1, bounds_error=False, fill_value=1.0)
ang_penalty = ( penalty_box(rotx) + penalty_box(roty) + penalty_box(rotz))
ypenalty = 0
if no_penalty:
ang_penalty = 1.0
return np.array(diff) * ang_penalty
if plot_cost_function:
test = [ | np.linspace(5.0, 25.0, 101) | numpy.linspace |
import PIL.Image as Image
import array
import h5py
import cv2 as cv
from scipy.ndimage.filters import uniform_filter
import pcl
import numpy as np
import time
import _pickle as cPickle
import copy
import math
def hdf2affimg(filename):
"""
Convert hdf5 file(output of affordance map network in lua) to img
# Notice according to the output of the model we need to resize
"""
h = h5py.File(filename,'r')
res = h['results']
res = np.array(res)
res = res[0,1,:,:]
resresize = cv.resize(res, (512, 424), interpolation=cv.INTER_CUBIC)
resresize[np.where(resresize>1)] = 0.9999
resresize[np.where(resresize<0)] = 0
return resresize
def get_patch(location_2d, cur_color, cur_depth, post_afford, size=(128,128)):
"""
input the postprocessed affordance map
return the 4*128*128 patch (cascade the depth and rgb)
"""
# Normalization of input
cur_color = cur_color / 255.
cur_depth = cur_depth / 65535.
y = location_2d[0]
x = location_2d[1]
r = int(size[0]/2)
patch_color = cur_color[y-r:y+r,x-r:x+r,:]
patch_depth = cur_depth[y-r:y+r,x-r:x+r]
patch_afford = post_afford[y-r:y+r,x-r:x+r]
patch_rgbd = np.zeros((size[0], size[1], 4))
patch_rgbd[:,:,0:3] = patch_color
patch_rgbd[:,:,3] = patch_depth
# Resize the patch for a smaller action space
patch_size = int(size[0]/4)
patch_rgbd = cv.resize(patch_rgbd, dsize=(patch_size, patch_size), interpolation=cv.INTER_CUBIC)
return np.transpose(patch_rgbd, (2,0,1)), patch_afford, location_2d
def postproc_affimg(cur_color, cur_depth, cur_afford, bg_color, bg_depth, intri, border_pos):
"""
# postprocess the affordance map
# convert from the afford_model from matlab to python
"""
cur_color = cur_color / 255.0
cur_depth = cur_depth / 10000
temp = (np.abs(cur_color -bg_color) < 0.3)
foregroundMaskColor = np.sum(temp, axis = 2) != 3
backgroundDepth_mask = np.zeros(bg_depth.shape, dtype = bool)
backgroundDepth_mask[bg_depth!=0] = True
foregroundMaskDepth = backgroundDepth_mask & (np.abs(cur_depth-bg_depth) > 0.005)
foregroundMask = (foregroundMaskColor | foregroundMaskDepth)
x = np.linspace(0,512-1,512)
y = np.linspace(0,424-1,424)
pixX,pixY = np.meshgrid(x,y)
camX = (pixX-intri[0,2])*cur_depth/intri[0,0]
camY = (pixY-intri[1,2])*cur_depth/intri[1,1]
camZ = cur_depth
validDepth = foregroundMask & (camZ != 0) # only points with valid depth and within foreground mask
inputPoints = [camX[validDepth],camY[validDepth],camZ[validDepth]]
inputPoints = np.asarray(inputPoints,dtype=np.float32)
foregroundPointcloud = pcl.PointCloud(np.transpose(inputPoints))
foregroundNormals = _surface_normals(foregroundPointcloud)
tmp = np.zeros((foregroundNormals.size, 4), dtype=np.float16)
for i in range(foregroundNormals.size):
tmp[i] = foregroundNormals[i]
foregroundNormals = np.nan_to_num(tmp[:,0:3])
pixX = np.rint(np.dot(inputPoints[0,:],intri[0,0])/inputPoints[2,:]+intri[0,2])
pixY = np.rint(np.dot(inputPoints[1,:],intri[1,1])/inputPoints[2,:]+intri[1,2])
surfaceNormalsMap = np.zeros(cur_color.shape)
arraySize = surfaceNormalsMap.shape
pixX = pixX.astype(np.int)
pixY = pixY.astype(np.int)
tmp = | np.ones(pixY.shape) | numpy.ones |
import numpy as np
import sys, os
sys.path.append(os.pardir)
from activation import *
from layers import *
from loss import *
from collections import OrderedDict
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, batch_norm=True, weight_decay=0.0, Prelu=False):
self.Prelu = Prelu
self.weight_decay = weight_decay
self.batch_norm = batch_norm
self.params = {}
self.params['W1'] = np.random.randn(input_size, hidden_size) / | np.sqrt(input_size/2.0) | numpy.sqrt |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import warnings
import numpy as np
from skbio import Sequence, Protein, DNA, RNA, TabularMSA
from skbio.alignment import (
global_pairwise_align_protein, local_pairwise_align_protein,
global_pairwise_align_nucleotide, local_pairwise_align_nucleotide,
make_identity_substitution_matrix, local_pairwise_align,
global_pairwise_align)
from skbio.alignment._pairwise import (
_init_matrices_sw, _init_matrices_nw,
_compute_score_and_traceback_matrices, _traceback, _first_largest,
_compute_substitution_score)
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.util._decorator import overrides
class CustomSequence(GrammaredSequence):
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('^$')
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '^'
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set('WXYZ')
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {}
class PairwiseAlignmentTests(TestCase):
"""
Note: In the high-level tests, the expected results were derived with
assistance from the EMBOSS web server:
http://www.ebi.ac.uk/Tools/psa/emboss_needle/
http://www.ebi.ac.uk/Tools/psa/emboss_water/
In some cases, placement of non-gap characters surrounded by gap
characters are slighly different between scikit-bio and the EMBOSS
server. These differences arise from arbitrary implementation
differences, and always result in the same score (which tells us that
the alignments are equivalent). In cases where the expected results
included here differ from those generated by the EMBOSS server, I note
the EMBOSS result as a comment below the expected value.
"""
def setUp(self):
"""Ignore warnings during tests."""
warnings.simplefilter("ignore")
def tearDown(self):
"""Clear the list of warning filters, so that no filters are active."""
warnings.resetwarnings()
def test_make_identity_substitution_matrix(self):
expected = {'A': {'A': 1, 'C': -2, 'G': -2, 'T': -2, 'U': -2},
'C': {'A': -2, 'C': 1, 'G': -2, 'T': -2, 'U': -2},
'G': {'A': -2, 'C': -2, 'G': 1, 'T': -2, 'U': -2},
'T': {'A': -2, 'C': -2, 'G': -2, 'T': 1, 'U': -2},
'U': {'A': -2, 'C': -2, 'G': -2, 'T': -2, 'U': 1}}
self.assertEqual(make_identity_substitution_matrix(1, -2), expected)
expected = {'A': {'A': 5, 'C': -4, 'G': -4, 'T': -4, 'U': -4},
'C': {'A': -4, 'C': 5, 'G': -4, 'T': -4, 'U': -4},
'G': {'A': -4, 'C': -4, 'G': 5, 'T': -4, 'U': -4},
'T': {'A': -4, 'C': -4, 'G': -4, 'T': 5, 'U': -4},
'U': {'A': -4, 'C': -4, 'G': -4, 'T': -4, 'U': 5}}
self.assertEqual(make_identity_substitution_matrix(5, -4), expected)
# TODO: duplicate of test_global_pairwise_align_custom_alphabet, remove
# when nondegenerate_chars is removed
def test_global_pairwise_align_custom_alphabet_nondegenerate_chars(self):
custom_substitution_matrix = make_identity_substitution_matrix(
1, -1, alphabet=CustomSequence.nondegenerate_chars)
custom_msa, custom_score, custom_start_end = global_pairwise_align(
CustomSequence("WXYZ"), CustomSequence("WXYYZZ"),
10.0, 5.0, custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(custom_msa, TabularMSA([CustomSequence('WXYZ^^'),
CustomSequence('WXYYZZ')]))
self.assertEqual(custom_score, 2.0)
self.assertEqual(custom_start_end, [(0, 3), (0, 5)])
def test_global_pairwise_align_custom_alphabet(self):
custom_substitution_matrix = make_identity_substitution_matrix(
1, -1, alphabet=CustomSequence.definite_chars)
custom_msa, custom_score, custom_start_end = global_pairwise_align(
CustomSequence("WXYZ"), CustomSequence("WXYYZZ"),
10.0, 5.0, custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(custom_msa, TabularMSA([CustomSequence('WXYZ^^'),
CustomSequence('WXYYZZ')]))
self.assertEqual(custom_score, 2.0)
self.assertEqual(custom_start_end, [(0, 3), (0, 5)])
# TODO: duplicate of test_local_pairwise_align_custom_alphabet, remove
# when nondegenerate_chars is removed.
def test_local_pairwise_align_custom_alphabet_nondegenerate_chars(self):
custom_substitution_matrix = make_identity_substitution_matrix(
5, -4, alphabet=CustomSequence.nondegenerate_chars)
custom_msa, custom_score, custom_start_end = local_pairwise_align(
CustomSequence("YWXXZZYWXXWYYZWXX"),
CustomSequence("YWWXZZZYWXYZWWX"), 5.0, 0.5,
custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(
custom_msa,
TabularMSA([CustomSequence('WXXZZYWXXWYYZWXX'),
CustomSequence('WXZZZYWX^^^YZWWX')]))
self.assertEqual(custom_score, 41.0)
self.assertEqual(custom_start_end, [(1, 16), (2, 14)])
def test_local_pairwise_align_custom_alphabet(self):
custom_substitution_matrix = make_identity_substitution_matrix(
5, -4, alphabet=CustomSequence.definite_chars)
custom_msa, custom_score, custom_start_end = local_pairwise_align(
CustomSequence("YWXXZZYWXXWYYZWXX"),
CustomSequence("YWWXZZZYWXYZWWX"), 5.0, 0.5,
custom_substitution_matrix)
# Expected values computed by running an equivalent alignment using the
# DNA alphabet with the following mapping:
#
# W X Y Z
# | | | |
# A C G T
#
self.assertEqual(
custom_msa,
TabularMSA([CustomSequence('WXXZZYWXXWYYZWXX'),
CustomSequence('WXZZZYWX^^^YZWWX')]))
self.assertEqual(custom_score, 41.0)
self.assertEqual(custom_start_end, [(1, 16), (2, 14)])
def test_global_pairwise_align_invalid_type(self):
with self.assertRaisesRegex(TypeError,
"GrammaredSequence.*"
"TabularMSA.*'Sequence'"):
global_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
def test_global_pairwise_align_dtype_mismatch(self):
with self.assertRaisesRegex(TypeError,
"same dtype: 'DNA' != 'RNA'"):
global_pairwise_align(DNA('ACGT'), TabularMSA([RNA('ACGU')]),
1.0, 1.0, {})
with self.assertRaisesRegex(TypeError,
"same dtype: 'DNA' != 'RNA'"):
global_pairwise_align(TabularMSA([DNA('ACGT')]),
TabularMSA([RNA('ACGU')]),
1.0, 1.0, {})
def test_global_pairwise_align_protein(self):
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
gap_extend_penalty=5.)
self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE-"),
Protein("---PAW-HEAE")]))
self.assertEqual(obs_score, 23.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# EMBOSS result: P---AW-HEAE
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=5.,
gap_extend_penalty=0.5)
self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHE-E"),
Protein("---PAW-HEAE")]))
self.assertEqual(obs_score, 30.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# Protein sequences with metadata
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE", metadata={'id': "s1"}),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa,
TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
Protein("---PAW-HEAE", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 23.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# One TabularMSA and one Protein as input
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
TabularMSA([Protein("HEAGAWGHEE", metadata={'id': "s1"})]),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa,
TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
Protein("---PAW-HEAE", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 23.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# One single-sequence alignment as input and one double-sequence
# alignment as input. Score confirmed manually.
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
TabularMSA([Protein("HEAGAWGHEE", metadata={'id': "s1"}),
Protein("HDAGAWGHDE", metadata={'id': "s2"})]),
TabularMSA([Protein("PAWHEAE", metadata={'id': "s3"})]),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa,
TabularMSA([Protein("HEAGAWGHEE-", metadata={'id': "s1"}),
Protein("HDAGAWGHDE-", metadata={'id': "s2"}),
Protein("---PAW-HEAE", metadata={'id': "s3"})]))
self.assertEqual(obs_score, 21.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
# TypeError on invalid input
self.assertRaises(TypeError, global_pairwise_align_protein,
42, Protein("HEAGAWGHEE"))
self.assertRaises(TypeError, global_pairwise_align_protein,
Protein("HEAGAWGHEE"), 42)
def test_global_pairwise_align_protein_invalid_dtype(self):
with self.assertRaisesRegex(TypeError,
"TabularMSA with Protein dtype.*dtype "
"'DNA'"):
global_pairwise_align_protein(TabularMSA([Protein('PAW')]),
TabularMSA([DNA('ACGT')]))
def test_global_pairwise_align_protein_penalize_terminal_gaps(self):
obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
gap_extend_penalty=5., penalize_terminal_gaps=True)
self.assertEqual(obs_msa, TabularMSA([Protein("HEAGAWGHEE"),
Protein("---PAWHEAE")]))
self.assertEqual(obs_score, 1.0)
self.assertEqual(obs_start_end, [(0, 9), (0, 6)])
def test_global_pairwise_align_nucleotide_penalize_terminal_gaps(self):
# in these tests one sequence is about 3x the length of the other.
# we toggle penalize_terminal_gaps to confirm that it results in
# different alignments and alignment scores.
seq1 = DNA("ACCGTGGACCGTTAGGATTGGACCCAAGGTTG")
seq2 = DNA("T"*25 + "ACCGTGGACCGTAGGATTGGACCAAGGTTA" + "A"*25)
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
match_score=5, mismatch_score=-4, penalize_terminal_gaps=False)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-------------------------ACCGTGGACCGTTAGGA"
"TTGGACCCAAGGTTG-------------------------"),
DNA("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
"TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")]))
self.assertEqual(obs_score, 131.0)
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,
match_score=5, mismatch_score=-4, penalize_terminal_gaps=True)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-------------------------ACCGTGGACCGTTAGGA"
"TTGGACCCAAGGTT-------------------------G"),
DNA("TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA"
"TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA")]))
self.assertEqual(obs_score, 97.0)
def test_local_pairwise_align_protein(self):
obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=10.,
gap_extend_penalty=5.)
self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE"),
Protein("AW-HE")]))
self.assertEqual(obs_score, 26.0)
self.assertEqual(obs_start_end, [(4, 8), (1, 4)])
obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
Protein("HEAGAWGHEE"), Protein("PAWHEAE"), gap_open_penalty=5.,
gap_extend_penalty=0.5)
self.assertEqual(obs_msa, TabularMSA([Protein("AWGHE-E"),
Protein("AW-HEAE")]))
self.assertEqual(obs_score, 32.0)
self.assertEqual(obs_start_end, [(4, 9), (1, 6)])
# Protein sequences with metadata
obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(
Protein("HEAGAWGHEE", metadata={'id': "s1"}),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.)
self.assertEqual(
obs_msa, TabularMSA([Protein("AWGHE", metadata={'id': "s1"}),
Protein("AW-HE", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 26.0)
self.assertEqual(obs_start_end, [(4, 8), (1, 4)])
# Fails when either input is passed as a TabularMSA
self.assertRaises(TypeError, local_pairwise_align_protein,
TabularMSA([Protein("HEAGAWGHEE",
metadata={'id': "s1"})]),
Protein("PAWHEAE", metadata={'id': "s2"}),
gap_open_penalty=10.,
gap_extend_penalty=5.)
self.assertRaises(TypeError, local_pairwise_align_protein,
Protein("HEAGAWGHEE", metadata={'id': "s1"}),
TabularMSA([Protein("PAWHEAE",
metadata={'id': "s2"})]),
gap_open_penalty=10., gap_extend_penalty=5.)
# TypeError on invalid input
self.assertRaises(TypeError, local_pairwise_align_protein,
42, Protein("HEAGAWGHEE"))
self.assertRaises(TypeError, local_pairwise_align_protein,
Protein("HEAGAWGHEE"), 42)
def test_global_pairwise_align_nucleotide(self):
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("G-ACCTTGACCAGGTACC"),
DNA("GAACTTTGAC---GTAAC")]))
self.assertEqual(obs_score, 41.0)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("-GACCTTGACCAGGTACC"),
DNA("GAACTTTGAC---GTAAC")]))
self.assertEqual(obs_score, 32.0)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
# DNA sequences with metadata
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GAACTTTGAC---GTAAC", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 32.0)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
# Align one DNA sequence and one TabularMSA, score computed manually
obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(
TabularMSA([DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GACCATGACCAGGTACC", metadata={'id': "s2"})]),
DNA("GAACTTTGACGTAAC", metadata={'id': "s3"}),
gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(
obs_msa,
TabularMSA([DNA("-GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("-GACCATGACCAGGTACC", metadata={'id': "s2"}),
DNA("GAACTTTGAC---GTAAC", metadata={'id': "s3"})]))
self.assertEqual(obs_score, 27.5)
self.assertEqual(obs_start_end, [(0, 16), (0, 14)])
# TypeError on invalid input
self.assertRaises(TypeError, global_pairwise_align_nucleotide,
42, DNA("ACGT"))
self.assertRaises(TypeError, global_pairwise_align_nucleotide,
DNA("ACGT"), 42)
def test_global_pairwise_align_nucleotide_invalid_dtype(self):
with self.assertRaisesRegex(TypeError,
"TabularMSA with DNA or RNA dtype.*dtype "
"'Protein'"):
global_pairwise_align_nucleotide(TabularMSA([DNA('ACGT')]),
TabularMSA([Protein('PAW')]))
def test_local_pairwise_align_nucleotide(self):
obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGACCAGGTACC"),
DNA("ACTTTGAC---GTAAC")]))
self.assertEqual(obs_score, 41.0)
self.assertEqual(obs_start_end, [(1, 16), (2, 14)])
obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
self.assertEqual(obs_msa, TabularMSA([DNA("ACCTTGAC"),
DNA("ACTTTGAC")]))
self.assertEqual(obs_score, 31.0)
self.assertEqual(obs_start_end, [(1, 8), (2, 9)])
# DNA sequences with metadata
obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
self.assertEqual(
obs_msa,
TabularMSA([DNA("ACCTTGAC", metadata={'id': "s1"}),
DNA("ACTTTGAC", metadata={'id': "s2"})]))
self.assertEqual(obs_score, 31.0)
self.assertEqual(obs_start_end, [(1, 8), (2, 9)])
# Fails when either input is passed as a TabularMSA
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
TabularMSA([DNA("GACCTTGACCAGGTACC",
metadata={'id': "s1"})]),
DNA("GAACTTTGACGTAAC", metadata={'id': "s2"}),
gap_open_penalty=10., gap_extend_penalty=5.,
match_score=5, mismatch_score=-4)
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
DNA("GACCTTGACCAGGTACC", metadata={'id': "s1"}),
TabularMSA([DNA("GAACTTTGACGTAAC",
metadata={'id': "s2"})]),
gap_open_penalty=10., gap_extend_penalty=5.,
match_score=5, mismatch_score=-4)
# TypeError on invalid input
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
42, DNA("ACGT"))
self.assertRaises(TypeError, local_pairwise_align_nucleotide,
DNA("ACGT"), 42)
def test_nucleotide_aligners_use_substitution_matrices(self):
alt_sub = make_identity_substitution_matrix(10, -10)
# alternate substitution matrix yields different alignment (the
# aligned sequences and the scores are different) with local alignment
msa_no_sub, score_no_sub, start_end_no_sub = \
local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
msa_alt_sub, score_alt_sub, start_end_alt_sub = \
local_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4, substitution_matrix=alt_sub)
self.assertNotEqual(msa_no_sub, msa_alt_sub)
self.assertNotEqual(score_no_sub, score_alt_sub)
self.assertNotEqual(start_end_no_sub, start_end_alt_sub)
# alternate substitution matrix yields different alignment (the
# aligned sequences and the scores are different) with global alignment
msa_no_sub, score_no_sub, start_end_no_sub = \
global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4)
msa_alt_sub, score_alt_sub, start_end_alt_sub = \
global_pairwise_align_nucleotide(
DNA("GACCTTGACCAGGTACC"), DNA("GAACTTTGACGTAAC"),
gap_open_penalty=10., gap_extend_penalty=5., match_score=5,
mismatch_score=-4, substitution_matrix=alt_sub)
self.assertNotEqual(msa_no_sub, msa_alt_sub)
self.assertNotEqual(score_no_sub, score_alt_sub)
self.assertEqual(start_end_no_sub, start_end_alt_sub)
def test_local_pairwise_align_invalid_type(self):
with self.assertRaisesRegex(TypeError,
'GrammaredSequence.*Sequence'):
local_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})
def test_local_pairwise_align_type_mismatch(self):
with self.assertRaisesRegex(TypeError,
"same type: 'DNA' != 'RNA'"):
local_pairwise_align(DNA('ACGT'), RNA('ACGU'), 1.0, 1.0, {})
def test_init_matrices_sw(self):
expected_score_m = np.zeros((5, 4))
expected_tback_m = [[0, 0, 0, 0],
[0, -1, -1, -1],
[0, -1, -1, -1],
[0, -1, -1, -1],
[0, -1, -1, -1]]
actual_score_m, actual_tback_m = _init_matrices_sw(
TabularMSA([DNA('AAA', metadata={'id': 'id'})]),
TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
def test_init_matrices_nw(self):
expected_score_m = [[0, -5, -7, -9],
[-5, 0, 0, 0],
[-7, 0, 0, 0],
[-9, 0, 0, 0],
[-11, 0, 0, 0]]
expected_tback_m = [[0, 3, 3, 3],
[2, -1, -1, -1],
[2, -1, -1, -1],
[2, -1, -1, -1],
[2, -1, -1, -1]]
actual_score_m, actual_tback_m = _init_matrices_nw(
TabularMSA([DNA('AAA', metadata={'id': 'id'})]),
TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
def test_compute_substitution_score(self):
# these results were computed manually
subs_m = make_identity_substitution_matrix(5, -4)
gap_chars = set('-.')
self.assertEqual(
_compute_substitution_score(['A'], ['A'], subs_m, 0, gap_chars),
5.0)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A'], subs_m, 0,
gap_chars),
5.0)
self.assertEqual(
_compute_substitution_score(['A', 'C'], ['A'], subs_m, 0,
gap_chars),
0.5)
self.assertEqual(
_compute_substitution_score(['A', 'C'], ['A', 'C'], subs_m, 0,
gap_chars),
0.5)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,
gap_chars),
2.5)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 1,
gap_chars),
3)
# alt subs_m
subs_m = make_identity_substitution_matrix(1, -2)
self.assertEqual(
_compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,
gap_chars),
0.5)
def test_compute_score_and_traceback_matrices(self):
# these results were computed manually
expected_score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 6],
[-11, -7, -3, 1]]
expected_tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 2]]
m = make_identity_substitution_matrix(2, -1)
actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
TabularMSA([DNA('ACG', metadata={'id': 'id'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
# different sequences
# these results were computed manually
expected_score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 3],
[-11, -7, -3, -2]]
expected_tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 1]]
m = make_identity_substitution_matrix(2, -1)
actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
TabularMSA([DNA('ACC', metadata={'id': 'id'})]),
TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)
np.testing.assert_array_equal(actual_score_m, expected_score_m)
np.testing.assert_array_equal(actual_tback_m, expected_tback_m)
# four sequences provided in two alignments
# these results were computed manually
expected_score_m = [[0, -5, -7, -9],
[-5, 2, -3, -5],
[-7, -3, 4, -1],
[-9, -5, -1, 3],
[-11, -7, -3, -2]]
expected_tback_m = [[0, 3, 3, 3],
[2, 1, 3, 3],
[2, 2, 1, 3],
[2, 2, 2, 1],
[2, 2, 2, 1]]
m = make_identity_substitution_matrix(2, -1)
actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(
TabularMSA([DNA('ACC', metadata={'id': 's1'}),
DNA('ACC', metadata={'id': 's2'})]),
TabularMSA([DNA('ACGT', metadata={'id': 's3'}),
DNA('ACGT', metadata={'id': 's4'})]), 5, 2, m)
| np.testing.assert_array_equal(actual_score_m, expected_score_m) | numpy.testing.assert_array_equal |
import numpy as np
from math import pi
def make_swatches(
start=0.5,
rotations=0,
min_sat=1.2,
max_sat=1.2,
min_light=0.0,
max_light=1.0,
gamma=1.0,
numbers=256.0,
reverse=False,
float=False,
hex=False,
) -> list:
"""Generates a list of RGB values with the cubehelix color scheme.
Cubehelix is intended to create color schemes spanning from black
to white, traversing through red, green, and blue using a tapered
helix in the colour cube with increasing perceived intensity. See
http://www.mrao.cam.ac.uk/~dag/CUBEHELIX/
Args:
start: The central hue at the middle of the scheme, ranging
from 0.0 to 3.0.
rotations: Deviation from the central hue with rotations of the
helix. Defaults to 0 being monochrome. Can be negative.
min_sat: The saturation at the start of the scheme.
max_sat: The saturation at the end of the scheme.
min_light: The lightness at the start of the scheme.
max_light: The lightness at the end of the scheme.
gamma: Emphasis of low or high intensity.
numbers: The number of colors within the scheme.
reverse: Return color list reversed.
float: Convert return values to float rather than 8-bit int.
hex: Convert none-float values to #xxxxxx format.
"""
# Clip some of the passed values into ranges that make sense.
start = np.clip(start, 0.0, 3.0)
min_sat = np.clip(min_sat, 0, 2)
max_sat = np.clip(max_sat, 0, 2)
min_light = np.clip(min_light, 0, 2)
max_light = np.clip(max_light, 0, 2)
numbers = np.clip(numbers, 1, 1024)
# Define transform scalars
fract = np.linspace(min_light, max_light, numbers)
phi = 2.0 * pi * ((start + 1) / 3.0 + rotations * fract)
fract **= gamma
satar = np.linspace(min_sat, max_sat, numbers)
amp = satar * fract * (1.0 - fract) / 2.0
# Define transform vectors/matrices
transform_matrix = np.array(
[[-0.14861, +1.78277], [-0.29227, -0.90649], [+1.97249, +0.00000]]
)
rotation_vector = np.array([ | np.cos(phi) | numpy.cos |
'''
Created on Dec 31, 2013
@author: <NAME> (<EMAIL>)
@copyright: (c) 2013 <NAME>
@license: MIT
'''
# standard library
from __future__ import division, print_function
import logging
import os
import types
# external libraries
import numpy as np
import scipy.signal
import scipy.io.wavfile
import matplotlib.pyplot as plt
#===================================================================================================
# Classes
#===================================================================================================
class Audio(object):
def __init__(self, channels=0, fs=96000, nofsamples=0, duration=None,
initialdata=None, dtype=np.float64):
"""Base class for audio processing. Samples are stored as a numpy array.
We can create an instance by specifying a channel count and one of either
a duration or a sample count parameter. The other way of creating an
instance is by providing an already existing numpy array containing the
audio samples.
The shape of the audio samples are always (Nsamples_per_channel, Nchannels).
"""
self._logger = logging.getLogger(__name__)
# We sometimes divide by the sample rate to get time values
assert fs > 0, "sample rate cannot be zero or negative"
self.fs = fs # sample rate should always be specified in the constructor
self.nofsamples = None # number of samples per channel
self.duration = None # duration (length) in seconds
self.ch = None # number of channels
self._comment = ''
if initialdata is None:
# if we are not given any initial samples we create an empty array of
# zeros for the audio samples.
assert isinstance(channels, int)
assert not(nofsamples!=0 and duration is not None), "choose either samples or duration"
self.ch = channels
if duration is not None:
self.nofsamples = int(duration*self.fs)
self.duration = duration
else:
self.nofsamples = nofsamples
self._set_duration()
# create space for the samples
self.samples = np.zeros((self.nofsamples, self.ch), dtype=dtype)
else:
# An array of initial samples are given, use this to extract
# channel count and durations.
assert isinstance(initialdata, np.ndarray), \
'Only numpy arrays are allowed as initial data'
assert channels == 0, "parameter 'channels' is redundant if initial data is specified"
assert nofsamples == 0, "parameter 'nofsamples' is redundant if initial data is specified"
assert duration is None, "parameter 'duration' is redundant if initial data is specified"
# copy the data to avoid unexpected data corruption
self.samples = initialdata.copy()
if self.samples.ndim == 1:
# if the array is
# array([ 1., 1., 1.])
# we expand it to
# array([[ 1.],
# [ 1.],
# [ 1.]])
#
self.samples = np.expand_dims(self.samples, axis=1)
assert self.samples.ndim == 2, 'shape must be (Nsamples, Nchannels)'
self.nofsamples, self.ch = self.samples.shape
# initial data is assumed to have more samples than channels
assert self.nofsamples > self.ch, 'shape must be (Nsamples, Nchannels)'
self._set_duration()
assert self.nofsamples is not None
assert self.duration is not None
assert self.ch is not None
def __str__(self):
s = '=======================================\n'
s += 'classname : %s\n' %self.__class__.__name__
s += 'sample rate : %.1f [Hz]\n' %self.fs
s += 'channels : %i\n' %self.ch
s += 'duration : %.3f [s]\n' %self.duration
s += 'datatype : %s\n' %self.samples.dtype
s += 'samples per ch : %i\n' %self.nofsamples
s += 'data size : %.3f [Mb]\n' %(self.samples.nbytes/(1024*1024))
s += 'has comment : %s\n' %('yes' if len(self._comment)!=0 else 'no')
if self.ch != 0:
# += '-----------------:---------------------\n'
s += 'peak : %s\n' %np.array_str(self.peak()[0],
precision=4, suppress_small=True)
s += 'RMS : %s\n' %np.array_str(self.rms(),
precision=4, suppress_small=True)
s += 'crestfactor : %s\n' %np.array_str(self.crest_factor(),
precision=4, suppress_small=True)
s += '-----------------:---------------------\n'
return s
def __len__(self):
return self.nofsamples
def _set_duration(self):
"""internal method
If we have modified the samples variable (by padding with zeros
for example) we need to re-calculate the duration
"""
self.duration = self.nofsamples/self.fs
def _set_samples(self, idx=0, samples=None):
"""internal method
NOTE: idx != channel
idx is always zero indexed since it refers to the numpy array. Channels
are always indexed from one since this is the natural way of identifying
channel numbers.
"""
assert isinstance(samples, np.ndarray)
assert len(samples) == self.nofsamples
self.samples[:,idx] = samples
def pretty_string_samples(self, idx_start=0, idx_end=20, precision=4, header=False):
s = ''
if header:
t = ' '
u = 'ch'
for i in range(self.ch):
t += '-------:'
u += ' %2i :' %(i+1)
t += '\n'
u += '\n'
s += t # -------:-------:-------:
s += u # ch 1 : 2 : 3 :
s += t # -------:-------:-------:
s += np.array_str(self.samples[idx_start:idx_end,:],
max_line_width=260, # we can print 32 channels before linewrap
precision=precision,
suppress_small=True)
if (idx_end-idx_start) < self.nofsamples:
s = s[:-1] # strip the right ']' character
s += '\n ...,\n'
lastlines = np.array_str(self.samples[-3:,:],
max_line_width=260,
precision=precision,
suppress_small=True)
s += ' %s\n' %lastlines[1:] # strip first '['
return s
def pad(self, nofsamples=0):
"""Zero pad *at the end* of the current audio data.
increases duration by samples/fs
"""
assert nofsamples >= 0, "Can't append negative number of samples"
zeros = | np.zeros((nofsamples, self.ch), dtype=self.samples.dtype) | numpy.zeros |
from __future__ import print_function, division
import os
import numpy as np
from ..discretization import StructuredGrid
from ..datbase import DataType, DataInterface
import flopy.utils.binaryfile as bf
from flopy.utils import HeadFile
import numpy.ma as ma
import struct
import sys
# Module for exporting vtk from flopy
np_to_vtk_type = {'int8': 'Int8',
'uint8': 'UInt8',
'int16': 'Int16',
'uint16': 'UInt16',
'int32': 'Int32',
'uint32': 'UInt32',
'int64': 'Int64',
'uint64': 'UInt64',
'float32': 'Float32',
'float64': 'Float64'}
np_to_struct = {'int8': 'b',
'uint8': 'B',
'int16': 'h',
'uint16': 'H',
'int32': 'i',
'uint32': 'I',
'int64': 'q',
'uint64': 'Q',
'float32': 'f',
'float64': 'd'}
class XmlWriterInterface:
"""
Helps writing vtk files.
Parameters
----------
file_path : str
output file path
"""
def __init__(self, file_path):
# class attributes
self.open_tag = False
self.current = []
self.indent_level = 0
self.indent_char = ' '
# open file and initialize
self.f = self._open_file(file_path)
self.write_string('<?xml version="1.0"?>')
# open VTKFile element
self.open_element('VTKFile').add_attributes(version='0.1')
def _open_file(self, file_path):
"""
Open the file for writing.
Return
------
File object.
"""
raise NotImplementedError('must define _open_file in child class')
def write_string(self, string):
"""
Write a string to the file.
"""
raise NotImplementedError('must define write_string in child class')
def open_element(self, tag):
if self.open_tag:
self.write_string(">")
indent = self.indent_level * self.indent_char
self.indent_level += 1
tag_string = "\n" + indent + "<%s" % tag
self.write_string(tag_string)
self.open_tag = True
self.current.append(tag)
return self
def close_element(self, tag=None):
self.indent_level -= 1
if tag:
assert (self.current.pop() == tag)
if self.open_tag:
self.write_string(">")
self.open_tag = False
indent = self.indent_level * self.indent_char
tag_string = "\n" + indent + "</%s>" % tag
self.write_string(tag_string)
else:
self.write_string("/>")
self.open_tag = False
self.current.pop()
return self
def add_attributes(self, **kwargs):
assert self.open_tag
for key in kwargs:
st = ' %s="%s"' % (key, kwargs[key])
self.write_string(st)
return self
def write_line(self, text):
if self.open_tag:
self.write_string('>')
self.open_tag = False
self.write_string('\n')
indent = self.indent_level * self.indent_char
self.write_string(indent)
self.write_string(text)
return self
def write_array(self, array, actwcells=None, **kwargs):
"""
Write an array to the file.
Parameters
----------
array : ndarray
the data array being output
actwcells : array
array of the active cells
kwargs : dictionary
Attributes to be added to the DataArray element
"""
raise NotImplementedError('must define write_array in child class')
def final(self):
"""
Finalize the file. Must be called.
"""
self.close_element('VTKFile')
assert (not self.open_tag)
self.f.close()
class XmlWriterAscii(XmlWriterInterface):
"""
Helps writing ascii vtk files.
Parameters
----------
file_path : str
output file path
"""
def __init__(self, file_path):
super(XmlWriterAscii, self).__init__(file_path)
def _open_file(self, file_path):
"""
Open the file for writing.
Return
------
File object.
"""
return open(file_path, "w")
def write_string(self, string):
"""
Write a string to the file.
"""
self.f.write(string)
def write_array(self, array, actwcells=None, **kwargs):
"""
Write an array to the file.
Parameters
----------
array : ndarray
the data array being output
actwcells : array
array of the active cells
kwargs : dictionary
Attributes to be added to the DataArray element
"""
# open DataArray element with relevant attributes
self.open_element('DataArray')
vtk_type = np_to_vtk_type[array.dtype.name]
self.add_attributes(type=vtk_type)
self.add_attributes(**kwargs)
self.add_attributes(format='ascii')
# write the data
nlay = array.shape[0]
for lay in range(nlay):
if actwcells is not None:
idx = (actwcells[lay] != 0)
array_lay_flat = array[lay][idx].flatten()
else:
array_lay_flat = array[lay].flatten()
# replace NaN values by -1e9 as there is a bug is Paraview when
# reading NaN in ASCII mode
# https://gitlab.kitware.com/paraview/paraview/issues/19042
# this may be removed in the future if they fix the bug
array_lay_flat[np.isnan(array_lay_flat)] = -1e9
s = ' '.join(['{}'.format(val) for val in array_lay_flat])
self.write_line(s)
# close DataArray element
self.close_element('DataArray')
return
class XmlWriterBinary(XmlWriterInterface):
"""
Helps writing binary vtk files.
Parameters
----------
file_path : str
output file path
"""
def __init__(self, file_path):
super(XmlWriterBinary, self).__init__(file_path)
if sys.byteorder == "little":
self.byte_order = '<'
self.add_attributes(byte_order='LittleEndian')
else:
self.byte_order = '>'
self.add_attributes(byte_order='BigEndian')
self.add_attributes(header_type="UInt64")
# class attributes
self.offset = 0
self.byte_count_size = 8
self.processed_arrays = []
def _open_file(self, file_path):
"""
Open the file for writing.
Return
------
File object.
"""
return open(file_path, "wb")
def write_string(self, string):
"""
Write a string to the file.
"""
self.f.write(str.encode(string))
def write_array(self, array, actwcells=None, **kwargs):
"""
Write an array to file.
Parameters
----------
array : ndarray
the data array being output
actwcells : array
array of the active cells
kwargs : dictionary
Attributes to be added to the DataArray element
"""
# open DataArray element with relevant attributes
self.open_element('DataArray')
vtk_type = np_to_vtk_type[array.dtype.name]
self.add_attributes(type=vtk_type)
self.add_attributes(**kwargs)
self.add_attributes(format='appended', offset=self.offset)
# store array for later writing (appended data section)
if actwcells is not None:
array = array[actwcells != 0]
a = np.ascontiguousarray(array.ravel())
array_size = array.size * array[0].dtype.itemsize
self.processed_arrays.append([a, array_size])
# calculate the offset of the start of the next piece of data
# offset is calculated from beginning of data section
self.offset += array_size + self.byte_count_size
# close DataArray element
self.close_element('DataArray')
return
def _write_size(self, block_size):
# size is a 64 bit unsigned integer
byte_order = self.byte_order + 'Q'
block_size = struct.pack(byte_order, block_size)
self.f.write(block_size)
def _append_array_binary(self, data):
# see vtk documentation and more details here:
# https://vtk.org/Wiki/VTK_XML_Formats#Appended_Data_Section
assert (data.flags['C_CONTIGUOUS'] or data.flags['F_CONTIGUOUS'])
assert data.ndim==1
data_format = self.byte_order + str(data.size) + \
np_to_struct[data.dtype.name]
binary_data = struct.pack(data_format, *data)
self.f.write(binary_data)
def final(self):
"""
Finalize the file. Must be called.
"""
# build data section
self.open_element('AppendedData')
self.add_attributes(encoding='raw')
self.write_line('_')
for a, block_size in self.processed_arrays:
self._write_size(block_size)
self._append_array_binary(a)
self.close_element('AppendedData')
# call super final
super(XmlWriterBinary, self).final()
class _Array(object):
# class to store array and tell if array is 2d
def __init__(self, array, array2d):
self.array = array
self.array2d = array2d
def _get_basic_modeltime(perlen_list):
modeltim = 0
totim = []
for tim in perlen_list:
totim.append(modeltim)
modeltim += tim
return totim
class Vtk(object):
"""
Class to build VTK object for exporting flopy vtk
Parameters
----------
model : MFModel
flopy model instance
verbose : bool
If True, stdout is verbose
nanval : float
no data value, default is -1e20
smooth : bool
if True, will create smooth layer elevations, default is False
point_scalars : bool
if True, will also output array values at cell vertices, default is
False; note this automatically sets smooth to True
vtk_grid_type : str
Specific vtk_grid_type or 'auto' (default). Possible specific values
are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'.
If 'auto', the grid type is automatically determined. Namely:
* A regular grid (in all three directions) will be saved as an
'ImageData'.
* A rectilinear (in all three directions), non-regular grid
will be saved as a 'RectilinearGrid'.
* Other grids will be saved as 'UnstructuredGrid'.
true2d : bool
If True, the model is expected to be 2d (1 layer, 1 row or 1 column)
and the data will be exported as true 2d data, default is False.
binary : bool
if True the output file will be binary, default is False
Attributes
----------
arrays : dict
Stores data arrays added to VTK object
"""
def __init__(self, model, verbose=None, nanval=-1e+20, smooth=False,
point_scalars=False, vtk_grid_type='auto', true2d=False,
binary=False):
if point_scalars:
smooth = True
if verbose is None:
verbose = model.verbose
self.verbose = verbose
# set up variables
self.model = model
self.modelgrid = model.modelgrid
self.nlay = self.modelgrid.nlay
if hasattr(self.model, 'dis') and hasattr(self.model.dis, 'laycbd'):
self.nlay = self.nlay + np.sum(self.model.dis.laycbd.array > 0)
self.nrow = self.modelgrid.nrow
self.ncol = self.modelgrid.ncol
self.shape = (self.nlay, self.nrow, self.ncol)
self.shape2d = (self.shape[1], self.shape[2])
self.shape_verts = (self.shape[0]+1, self.shape[1]+1, self.shape[2]+1)
self.shape_verts2d = (self.shape_verts[1], self.shape_verts[2])
self.nanval = nanval
self.arrays = {}
self.vectors = {}
self.smooth = smooth
self.point_scalars = point_scalars
self.has_cell_data = False
self.has_point_data = False
# check if structured grid, vtk only supports structured grid
assert (isinstance(self.modelgrid, StructuredGrid))
# cbd
self.cbd_on = False
# get ibound
if self.modelgrid.idomain is None:
# ibound = None
ibound = np.ones(self.shape)
else:
ibound = self.modelgrid.idomain
# build cbd ibound
if ibound is not None and hasattr(self.model, 'dis') and \
hasattr(self.model.dis, 'laycbd'):
self.cbd = np.where(self.model.dis.laycbd.array > 0)
ibound = np.insert(ibound, self.cbd[0] + 1, ibound[self.cbd[
0], :, :],
axis=0)
self.cbd_on = True
self.ibound = ibound
self.true2d = true2d
self.nx = self.modelgrid.ncol
self.ny = self.modelgrid.nrow
self.nz = self.modelgrid.nlay
if self.true2d:
if self.nz == 1:
self.nz = 0
elif self.ny == 1:
self.ny = 0
elif self.nx == 1:
self.nx = 0
else:
raise ValueError('The option true2d was used but the model is '
'not 2d.')
self.cell_type = 8
else:
self.cell_type = 11
self.vtk_grid_type, self.file_extension = \
self._vtk_grid_type(vtk_grid_type)
self.binary = binary
return
def _vtk_grid_type(self, vtk_grid_type='auto'):
"""
Determines the vtk grid type and corresponding file extension.
Parameters
----------
vtk_grid_type : str
Specific vtk_grid_type or 'auto'. Possible specific values are
'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'.
If 'auto', the grid type is automatically determined. Namely:
* A regular grid (in all three directions) will be saved as an
'ImageData'.
* A rectilinear (in all three directions), non-regular grid
will be saved as a 'RectilinearGrid'.
* Other grids will be saved as 'UnstructuredGrid'.
Returns
----------
(vtk_grid_type, file_extension) : tuple of two strings
"""
# if 'auto', determine the vtk grid type automatically
if vtk_grid_type == 'auto':
if self.modelgrid.grid_type == 'structured':
if self.modelgrid.is_regular or \
(self.modelgrid.is_regular_xy and self.nz == 0) or \
(self.modelgrid.is_regular_xz and self.ny == 0) or \
(self.modelgrid.is_regular_yz and self.nx == 0):
vtk_grid_type = 'ImageData'
elif self.modelgrid.is_rectilinear or self.nz == 0:
vtk_grid_type = 'RectilinearGrid'
else:
vtk_grid_type = 'UnstructuredGrid'
else:
vtk_grid_type = 'UnstructuredGrid'
# otherwise, check the validity of the passed vtk_grid_type
else:
allowable_types = ['ImageData', 'RectilinearGrid',
'UnstructuredGrid']
if not any(vtk_grid_type in s for s in allowable_types):
raise ValueError('"' + vtk_grid_type + '" is not a correct '\
'vtk_grid_type.')
if (vtk_grid_type == 'ImageData' or \
vtk_grid_type == 'RectilinearGrid') and \
not self.modelgrid.grid_type == 'structured':
raise NotImplementedError('vtk_grid_type cannot be "' + \
vtk_grid_type + '" for a grid '\
'that is not structured')
if vtk_grid_type == 'ImageData' and \
not self.modelgrid.is_regular and \
not (self.modelgrid.is_regular_xy and self.nz == 0) and \
not (self.modelgrid.is_regular_xz and self.ny == 0) and \
not (self.modelgrid.is_regular_yz and self.nx == 0):
raise ValueError('vtk_grid_type cannot be "ImageData" for a '\
'non-regular grid spacing')
if vtk_grid_type == 'RectilinearGrid' and \
not self.modelgrid.is_rectilinear and not self.nz == 0:
raise ValueError('vtk_grid_type cannot be "RectilinearGrid" '\
'for a non-rectilinear grid spacing')
# determine the file extension
if vtk_grid_type == 'ImageData':
file_extension = '.vti'
elif vtk_grid_type == 'RectilinearGrid':
file_extension = '.vtr'
# else vtk_grid_type == 'UnstructuredGrid'
else:
file_extension = '.vtu'
# return vtk grid type and file extension
return (vtk_grid_type, file_extension)
def _format_array(self, a, array2d=False):
"""
Formats array for vtk output.
Parameters
----------
name : str
name of the array
a : flopy array
the array to be added to the vtk object
array2d : bool
True if the array is 2d
Return
------
Formatted array (note a copy is made)
"""
# if array is 2d reformat to 3d array
if array2d:
if a.shape == self.shape2d:
array = np.full(self.shape, self.nanval)
elif a.shape == self.shape_verts2d:
array = np.full(self.shape_verts, self.nanval)
else:
raise ValueError('Incompatible array size')
array[0, :, :] = a
a = array
# deal with inactive cells
inactive3d = self.ibound==0
if a.shape == self.shape:
# set to nan where nanval or where ibound==0
where_to_nan = np.logical_or(a==self.nanval, inactive3d)
self.has_cell_data = True
elif a.shape == self.shape_verts:
# set to nan where ibound==0 at all 8 neighbors
where_to_nan = np.full(self.shape_verts, True)
where_to_nan[:-1, :-1, :-1] = inactive3d
where_to_nan[:-1, :-1, 1:] = np.logical_and(
where_to_nan[:-1, :-1, 1:], inactive3d)
where_to_nan[:-1, 1:, :-1] = np.logical_and(
where_to_nan[:-1, 1:, :-1], inactive3d)
where_to_nan[:-1, 1:, 1:] = np.logical_and(
where_to_nan[:-1, 1:, 1:], inactive3d)
where_to_nan[1:, :-1, :-1] = np.logical_and(
where_to_nan[1:, :-1, :-1], inactive3d)
where_to_nan[1:, :-1, 1:] = np.logical_and(
where_to_nan[1:, :-1, 1:], inactive3d)
where_to_nan[1:, 1:, :-1] = np.logical_and(
where_to_nan[1:, 1:, :-1], inactive3d)
where_to_nan[1:, 1:, 1:] = np.logical_and(
where_to_nan[1:, 1:, 1:], inactive3d)
self.has_point_data = True
self.smooth = True
else:
# incompatible size, skip this array
return None
a = np.where(where_to_nan, np.nan, a)
return a
def add_array(self, name, a, array2d=False):
"""
Adds an array to the vtk object.
Parameters
----------
name : str
Name of the array.
a : flopy array
The array to be added to the vtk object.
The shape should match either grid cells or grid vertices.
array2d : bool
true if the array is 2d and represents the first layer,
default is False
"""
# format array
a = self._format_array(a, array2d)
# add to self.arrays
if a is not None:
self.arrays[name] = a
return
def add_vector(self, name, v, array2d=False):
"""
Adds a vector (i.e., a tuple of arrays) to the vtk object.
Parameters
----------
name : str
Name of the vector.
v : tuple of arrays
The vector to be added to the vtk object. The shape of each
component should match either grid cells or grid vertices.
array2d : bool
true if the vector components are 2d arrays and represent the first
layer, default is False
Notes
-----
If the grid is rotated, the vector will be rotated too, assuming that
the first and second components are along x and y directions,
respectively.
"""
# format each component of the vector
vf = ()
for vcomp in v:
vcomp = self._format_array(vcomp, array2d=array2d)
if vcomp is None:
return
vf = vf + (vcomp,)
# rotate the vector according to grid
if self.modelgrid.angrot_radians != 0.:
from ..utils import geometry
vf = list(vf)
vf[0], vf[1] = geometry.rotate(vf[0], vf[1], 0., 0.,
self.modelgrid.angrot_radians)
vf = tuple(vf)
# add to self.vectors
self.vectors[name] = vf
return
def write(self, output_file, timeval=None):
"""
Writes the stored arrays to vtk file in XML format.
Parameters
----------
output_file : str
output file name without extension (extension is determined
automatically)
timeval : scalar
model time value to be stored in the time section of the vtk
file, default is None
"""
# output file
output_file = output_file + self.file_extension
if self.verbose:
print('Writing vtk file: ' + output_file)
# initialize xml file
if self.binary:
xml = XmlWriterBinary(output_file)
else:
xml = XmlWriterAscii(output_file)
xml.add_attributes(type=self.vtk_grid_type)
# grid type
xml.open_element(self.vtk_grid_type)
# if time value write time section
if timeval:
xml.open_element('FieldData')
xml.write_array(np.array([timeval]), Name='TimeValue',
NumberOfTuples='1', RangeMin='{0}', RangeMax='{0}')
xml.close_element('FieldData')
if self.vtk_grid_type == 'UnstructuredGrid':
# get the active data cells based on the data arrays and ibound
actwcells3d = self._configure_data_arrays()
# get the verts and iverts to be output
verts, iverts, _ = \
self._get_3d_vertex_connectivity(actwcells=actwcells3d)
# check if there is data to be written out
if len(verts) == 0:
# if nothing, cannot write file
return
# get the total number of cells and vertices
ncells = len(iverts)
if self.true2d:
npoints = ncells * 4
else:
npoints = ncells * 8
if self.verbose:
print('Number of point is {}, Number of cells is {}\n'.format(
npoints, ncells))
# piece
xml.open_element('Piece')
xml.add_attributes(NumberOfPoints=npoints, NumberOfCells=ncells)
# points
xml.open_element('Points')
verts = np.array(list(verts.values()))
verts.reshape(npoints, 3)
xml.write_array(verts, Name='points', NumberOfComponents='3')
xml.close_element('Points')
# cells
xml.open_element('Cells')
# connectivity
iverts = np.array(list(iverts.values()))
xml.write_array(iverts, Name='connectivity',
NumberOfComponents='1')
# offsets
offsets = np.empty((iverts.shape[0]), np.int32)
icount = 0
for index, row in enumerate(iverts):
icount += len(row)
offsets[index] = icount
xml.write_array(offsets, Name='offsets', NumberOfComponents='1')
# types
types = np.full((iverts.shape[0]), self.cell_type, dtype=np.uint8)
xml.write_array(types, Name='types', NumberOfComponents='1')
# end cells
xml.close_element('Cells')
elif self.vtk_grid_type == 'ImageData':
# note: in vtk, "extent" actually means indices of grid lines
vtk_extent_str = '0' + ' ' + str(self.nx) + ' ' + \
'0' + ' ' + str(self.ny) + ' ' + \
'0' + ' ' + str(self.nz)
xml.add_attributes(WholeExtent=vtk_extent_str)
grid_extent = self.modelgrid.xyzextent
vtk_origin_str = str(grid_extent[0]) + ' ' + \
str(grid_extent[2]) + ' ' + \
str(grid_extent[4])
xml.add_attributes(Origin=vtk_origin_str)
vtk_spacing_str = str(self.modelgrid.delr[0]) + ' ' + \
str(self.modelgrid.delc[0]) + ' ' + \
str(self.modelgrid.top[0, 0] -
self.modelgrid.botm[0, 0, 0])
xml.add_attributes(Spacing=vtk_spacing_str)
# piece
xml.open_element('Piece').add_attributes(Extent=vtk_extent_str)
elif self.vtk_grid_type == 'RectilinearGrid':
# note: in vtk, "extent" actually means indices of grid lines
vtk_extent_str = '0' + ' ' + str(self.nx) + ' ' + \
'0' + ' ' + str(self.ny) + ' ' + \
'0' + ' ' + str(self.nz)
xml.add_attributes(WholeExtent=vtk_extent_str)
# piece
xml.open_element('Piece').add_attributes(Extent=vtk_extent_str)
# grid coordinates
xml.open_element('Coordinates')
# along x
xedges = self.modelgrid.xyedges[0]
xml.write_array(xedges, Name='coord_x', NumberOfComponents='1')
# along y
yedges = np.flip(self.modelgrid.xyedges[1])
xml.write_array(yedges, Name='coord_y', NumberOfComponents='1')
# along z
zedges = np.flip(self.modelgrid.zedges)
xml.write_array(zedges, Name='coord_z', NumberOfComponents='1')
# end coordinates
xml.close_element('Coordinates')
if self.has_cell_data:
# cell data
xml.open_element('CellData')
# loop through stored arrays
for name, a in self.arrays.items():
if a.shape == self.shape_verts:
# these are dealt with later
continue
if self.vtk_grid_type == 'UnstructuredGrid':
xml.write_array(a, actwcells=actwcells3d, Name=name,
NumberOfComponents='1')
else:
# flip "a" so coordinates increase along with indices as in
# vtk
a = np.flip(a, axis=[0, 1])
xml.write_array(a, Name=name, NumberOfComponents='1')
# loop through stored vectors
for name, v in self.vectors.items():
if v[0].shape == self.shape_verts:
# these are dealt with later
continue
ncomp = len(v)
v_as_array = np.moveaxis(np.array(v), 0, -1)
if self.vtk_grid_type == 'UnstructuredGrid':
shape4d = actwcells3d.shape + (ncomp,)
actwcells4d = actwcells3d.reshape(actwcells3d.shape + (1,))
actwcells4d = np.broadcast_to(actwcells4d, shape4d)
xml.write_array(v_as_array, actwcells=actwcells4d,
Name=name, NumberOfComponents=ncomp)
else:
# flip "v" so coordinates increase along with indices as in
# vtk
v_as_array = np.flip(v_as_array, axis=[0, 1])
xml.write_array(v_as_array, Name=name,
NumberOfComponents=ncomp)
# end cell data
xml.close_element('CellData')
if self.point_scalars or self.has_point_data:
# point data (i.e., values at vertices)
xml.open_element('PointData')
# loop through stored arrays
for name, a in self.arrays.items():
if a.shape == self.shape:
if not self.point_scalars:
continue
# get the array values onto vertices
if self.vtk_grid_type == 'UnstructuredGrid':
_, _, averts = self._get_3d_vertex_connectivity(
actwcells=actwcells3d, zvalues=a)
a = np.array(list(averts.values()))
else:
a = self.modelgrid.array_at_verts(a)
a = np.flip(a, axis=[0, 1])
# deal with true2d
if self.true2d:
if self.nz == 0:
a = a[0, :, :]
elif self.ny == 0:
a = a[:, 0, :]
elif self.nz == 0:
a = a[:, :, 0]
else:
if self.vtk_grid_type == 'UnstructuredGrid':
# still need to do this to be consistent with
# connectivity (i.e. 8 points for every cell)
_, _, averts = self._get_3d_vertex_connectivity(
actwcells=actwcells3d, zvalues=a)
a = np.array(list(averts.values()))
else:
# flip "a" so coordinates increase along with indices
# as in vtk
a = np.flip(a, axis=[0, 1])
# deal with true2d
if self.true2d:
if self.nz == 0:
a = a[0, :, :]
elif self.ny == 0:
a = a[:, 0, :]
elif self.nz == 0:
a = a[:, :, 0]
xml.write_array(a, Name=name, NumberOfComponents='1')
# loop through stored vectors
for name, v in self.vectors.items():
if v[0].shape == self.shape:
if not self.point_scalars:
continue
# get the vector values onto vertices
v_verts = ()
for vcomp in v:
if self.vtk_grid_type == 'UnstructuredGrid':
_, _, averts = self._get_3d_vertex_connectivity(
actwcells=actwcells3d, zvalues=vcomp)
vcomp = np.array(list(averts.values()))
else:
vcomp = self.modelgrid.array_at_verts(vcomp)
vcomp = np.flip(vcomp, axis=[0, 1])
# deal with true2d
if self.true2d:
if self.nz == 0:
vcomp = vcomp[0, :, :]
elif self.ny == 0:
vcomp = vcomp[:, 0, :]
elif self.nz == 0:
vcomp = vcomp[:, :, 0]
v_verts = v_verts + (vcomp,)
v = v_verts
else:
v_verts = ()
for vcomp in v:
if self.vtk_grid_type == 'UnstructuredGrid':
# still need to do this to be consistent with
# connectivity (i.e. 8 points for every cell)
_, _, averts = self._get_3d_vertex_connectivity(
actwcells=actwcells3d, zvalues=vcomp)
vcomp = np.array(list(averts.values()))
else:
vcomp = | np.flip(vcomp, axis=[0, 1]) | numpy.flip |
import numpy as np
import torch
from darts.models.forecasting.forecasting_model import GlobalForecastingModel
import os
import sys
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
module_paths = [
os.path.abspath(os.getcwd()),
]
for module_path in module_paths:
print(module_path)
if module_path not in sys.path:
sys.path.append(module_path)
from models.utils import eval_all_dyn_syst, scaler, new_args_dict
import pandas as pd
from functools import partial
print = partial(print, flush=True)
from sklearn.linear_model import Ridge
from typing import Union, Sequence, Optional
from darts import TimeSeries
class esn(GlobalForecastingModel):
def delete(self):
return 0
def __init__(self, iters, cell_type, reservoir_size=1000, sparsity=0.01, radius=0.6, sigma_input=1,
dynamics_fit_ratio=2 / 7,
regularization=0.0,
scaler_tt='Standard', solver='auto', model_name='RC-CHAOS-ESN', seed=1, ensemble_base_model=False):
self.model_name = model_name
self.iters = iters
self.reservoir_size = reservoir_size
self.sparsity = sparsity
self.radius = radius
self.sigma_input = sigma_input
self.dynamics_fit_ratio = dynamics_fit_ratio
self.regularization = regularization
self.solver = 'auto'
self.scaler_tt = scaler_tt
self.scaler = scaler(self.scaler_tt)
def getWeights(self, sizex, sizey, radius, sparsity):
W = np.random.random((sizex, sizey))
eigenvalues, _ = np.linalg.eig(W)
eigenvalues = np.abs(eigenvalues)
W = (W / np.max(eigenvalues)) * radius
return W
def augmentHidden(self, h):
h_aug = h.copy()
h_aug[::2] = pow(h_aug[::2], 2.0)
return h_aug
def getAugmentedStateSize(self):
return self.reservoir_size
def fit(self,
series: Union[TimeSeries, Sequence[TimeSeries]],
past_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
future_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None
) -> None:
super().fit(series)
data = np.array(series.all_values())
train_input_sequence = data.squeeze(1)
dynamics_length = int(len(data) * self.dynamics_fit_ratio)
N, input_dim = np.shape(train_input_sequence)
train_input_sequence = self.scaler.scaleData(train_input_sequence)
W_h = self.getWeights(self.reservoir_size, self.reservoir_size, self.radius, self.sparsity)
# Input weights
W_in = np.zeros((self.reservoir_size, input_dim))
q = int(self.reservoir_size / input_dim)
for i in range(0, input_dim):
W_in[i * q:(i + 1) * q, i] = self.sigma_input * (-1 + 2 * np.random.rand(q))
# Training length
tl = N - dynamics_length
W_h = torch.tensor(W_h, requires_grad=True)
W_in = torch.tensor(W_in, requires_grad=True)
learning_rate = 0.01
a0 = learning_rate
decay = 0.95
iterations = self.iters
for iter in range(iterations):
h = torch.zeros((self.reservoir_size, 1), dtype=torch.float64)
# Washout phase
for t in range(dynamics_length):
i = np.reshape(train_input_sequence[t], (-1, 1))
i = torch.tensor(i)
h = torch.tanh(W_h @ h + W_in @ i)
H = []
Y = []
# Training
for t in range(tl - 1):
i = np.reshape(train_input_sequence[t + dynamics_length], (-1, 1))
i = torch.tensor(i, dtype=torch.float64)
h = torch.tanh(W_h @ h + W_in @ i)
copy = torch.clone(h)
copy[1::2] = 1
h_aug = h * copy
H.append(h_aug[:, 0])
target = np.reshape(train_input_sequence[t + dynamics_length + 1], (-1, 1))
Y.append(target[:, 0])
split = int(tl * 0.8)
H_train = [torch.clone(x).detach().numpy() for x in H[:split]]
Y_train = Y[:split]
H_test = H[split:]
Y_test = [torch.tensor(x) for x in Y[split:]]
H = [torch.clone(x).detach().numpy() for x in H]
ridge = Ridge(alpha=self.regularization, fit_intercept=False, copy_X=True,
solver=self.solver)
if iter == iterations - 1:
ridge.fit(H, Y)
W_out = torch.tensor(ridge.coef_)
break
else:
ridge.fit(H_train, Y_train)
W_out = torch.tensor(ridge.coef_)
loss = torch.tensor([0], dtype=torch.float64)
for sample in range(len(H_test)):
loss += torch.pow(W_out @ H_test[sample] - Y_test[sample], 2)
loss /= len(H_test)
loss.backward()
with torch.no_grad():
print('Loss: ' + str(loss * 1e9))
W_in -= learning_rate * W_in.grad
W_h -= learning_rate * W_h.grad
W_in.grad.zero_()
W_h.grad.zero_()
learning_rate = decay ** iter * a0
self.W_in = W_in.detach().numpy()
self.W_h = W_h.detach().numpy()
self.W_out = W_out.detach().numpy()
self.n_trainable_parameters = np.size(self.W_out)
self.n_model_parameters = np.size(self.W_in) + | np.size(self.W_h) | numpy.size |
# coding: utf-8
# # Building your Recurrent Neural Network - Step by Step
#
# Welcome to Course 5's first assignment! In this assignment, you will implement your first Recurrent Neural Network in numpy.
#
# Recurrent Neural Networks (RNN) are very effective for Natural Language Processing and other sequence tasks because they have "memory". They can read inputs $x^{\langle t \rangle}$ (such as words) one at a time, and remember some information/context through the hidden layer activations that get passed from one time-step to the next. This allows a uni-directional RNN to take information from the past to process later inputs. A bidirection RNN can take context from both the past and the future.
#
# **Notation**:
# - Superscript $[l]$ denotes an object associated with the $l^{th}$ layer.
# - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.
#
# - Superscript $(i)$ denotes an object associated with the $i^{th}$ example.
# - Example: $x^{(i)}$ is the $i^{th}$ training example input.
#
# - Superscript $\langle t \rangle$ denotes an object at the $t^{th}$ time-step.
# - Example: $x^{\langle t \rangle}$ is the input x at the $t^{th}$ time-step. $x^{(i)\langle t \rangle}$ is the input at the $t^{th}$ timestep of example $i$.
#
# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$.
#
# We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!
# Let's first import all the packages that you will need during this assignment.
# In[3]:
import numpy as np
from rnn_utils import *
# ## 1 - Forward propagation for the basic Recurrent Neural Network
#
# Later this week, you will generate music using an RNN. The basic RNN that you will implement has the structure below. In this example, $T_x = T_y$.
# <img src="images/RNN.png" style="width:500;height:300px;">
# <caption><center> **Figure 1**: Basic RNN model </center></caption>
# Here's how you can implement an RNN:
#
# **Steps**:
# 1. Implement the calculations needed for one time-step of the RNN.
# 2. Implement a loop over $T_x$ time-steps in order to process all the inputs, one at a time.
#
# Let's go!
#
# ## 1.1 - RNN cell
#
# A Recurrent neural network can be seen as the repetition of a single cell. You are first going to implement the computations for a single time-step. The following figure describes the operations for a single time-step of an RNN cell.
#
# <img src="images/rnn_step_forward.png" style="width:700px;height:300px;">
# <caption><center> **Figure 2**: Basic RNN cell. Takes as input $x^{\langle t \rangle}$ (current input) and $a^{\langle t - 1\rangle}$ (previous hidden state containing information from the past), and outputs $a^{\langle t \rangle}$ which is given to the next RNN cell and also used to predict $y^{\langle t \rangle}$ </center></caption>
#
# **Exercise**: Implement the RNN-cell described in Figure (2).
#
# **Instructions**:
# 1. Compute the hidden state with tanh activation: $a^{\langle t \rangle} = \tanh(W_{aa} a^{\langle t-1 \rangle} + W_{ax} x^{\langle t \rangle} + b_a)$.
# 2. Using your new hidden state $a^{\langle t \rangle}$, compute the prediction $\hat{y}^{\langle t \rangle} = softmax(W_{ya} a^{\langle t \rangle} + b_y)$. We provided you a function: `softmax`.
# 3. Store $(a^{\langle t \rangle}, a^{\langle t-1 \rangle}, x^{\langle t \rangle}, parameters)$ in cache
# 4. Return $a^{\langle t \rangle}$ , $y^{\langle t \rangle}$ and cache
#
# We will vectorize over $m$ examples. Thus, $x^{\langle t \rangle}$ will have dimension $(n_x,m)$, and $a^{\langle t \rangle}$ will have dimension $(n_a,m)$.
# In[4]:
# GRADED FUNCTION: rnn_cell_forward
def rnn_cell_forward(xt, a_prev, parameters):
"""
Implements a single forward step of the RNN-cell as described in Figure (2)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)
"""
# Retrieve parameters from "parameters"
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
### START CODE HERE ### (≈2 lines)
# compute next activation state using the formula given above
a_next = np.tanh(np.dot(Wax,xt) + np.dot(Waa,a_prev) + ba)
# compute output of the current cell using the formula given above
yt_pred = softmax(np.dot(Wya,a_next) + by)
### END CODE HERE ###
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache
# In[5]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a_next, yt_pred, cache = rnn_cell_forward(xt, a_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", a_next.shape)
print("yt_pred[1] =", yt_pred[1])
print("yt_pred.shape = ", yt_pred.shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a_next[4]**:
# </td>
# <td>
# [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978
# -0.18887155 0.99815551 0.6531151 0.82872037]
# </td>
# </tr>
# <tr>
# <td>
# **a_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **yt[1]**:
# </td>
# <td>
# [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212
# 0.36920224 0.9966312 0.9982559 0.17746526]
# </td>
# </tr>
# <tr>
# <td>
# **yt.shape**:
# </td>
# <td>
# (2, 10)
# </td>
# </tr>
#
# </table>
# ## 1.2 - RNN forward pass
#
# You can see an RNN as the repetition of the cell you've just built. If your input sequence of data is carried over 10 time steps, then you will copy the RNN cell 10 times. Each cell takes as input the hidden state from the previous cell ($a^{\langle t-1 \rangle}$) and the current time-step's input data ($x^{\langle t \rangle}$). It outputs a hidden state ($a^{\langle t \rangle}$) and a prediction ($y^{\langle t \rangle}$) for this time-step.
#
#
# <img src="images/rnn.png" style="width:800px;height:300px;">
# <caption><center> **Figure 3**: Basic RNN. The input sequence $x = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is carried over $T_x$ time steps. The network outputs $y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$. </center></caption>
#
#
#
# **Exercise**: Code the forward propagation of the RNN described in Figure (3).
#
# **Instructions**:
# 1. Create a vector of zeros ($a$) that will store all the hidden states computed by the RNN.
# 2. Initialize the "next" hidden state as $a_0$ (initial hidden state).
# 3. Start looping over each time step, your incremental index is $t$ :
# - Update the "next" hidden state and the cache by running `rnn_cell_forward`
# - Store the "next" hidden state in $a$ ($t^{th}$ position)
# - Store the prediction in y
# - Add the cache to the list of caches
# 4. Return $a$, $y$ and caches
# In[6]:
# GRADED FUNCTION: rnn_forward
def rnn_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y_pred -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of caches, x)
"""
# Initialize "caches" which will contain the list of all caches
caches = []
# Retrieve dimensions from shapes of x and parameters["Wya"]
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wya"].shape
### START CODE HERE ###
# initialize "a" and "y" with zeros (≈2 lines)
a = np.zeros([n_a,m,T_x])
y_pred = np.zeros([n_y,m,T_x])
# Initialize a_next (≈1 line)
a_next = a0
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, compute the prediction, get the cache (≈1 line)
a_next, yt_pred, cache = rnn_cell_forward(x[:,:,t],a_next,parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y_pred[:,:,t] = yt_pred
# Append "cache" to "caches" (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y_pred, caches
# In[7]:
np.random.seed(1)
x = np.random.randn(3,10,4)
a0 = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a, y_pred, caches = rnn_forward(x, a0, parameters)
print("a[4][1] = ", a[4][1])
print("a.shape = ", a.shape)
print("y_pred[1][3] =", y_pred[1][3])
print("y_pred.shape = ", y_pred.shape)
print("caches[1][1][3] =", caches[1][1][3])
print("len(caches) = ", len(caches))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a[4][1]**:
# </td>
# <td>
# [-0.99999375 0.77911235 -0.99861469 -0.99833267]
# </td>
# </tr>
# <tr>
# <td>
# **a.shape**:
# </td>
# <td>
# (5, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **y[1][3]**:
# </td>
# <td>
# [ 0.79560373 0.86224861 0.11118257 0.81515947]
# </td>
# </tr>
# <tr>
# <td>
# **y.shape**:
# </td>
# <td>
# (2, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **cache[1][1][3]**:
# </td>
# <td>
# [-1.1425182 -0.34934272 -0.20889423 0.58662319]
# </td>
# </tr>
# <tr>
# <td>
# **len(cache)**:
# </td>
# <td>
# 2
# </td>
# </tr>
#
# </table>
# Congratulations! You've successfully built the forward propagation of a recurrent neural network from scratch. This will work well enough for some applications, but it suffers from vanishing gradient problems. So it works best when each output $y^{\langle t \rangle}$ can be estimated using mainly "local" context (meaning information from inputs $x^{\langle t' \rangle}$ where $t'$ is not too far from $t$).
#
# In the next part, you will build a more complex LSTM model, which is better at addressing vanishing gradients. The LSTM will be better able to remember a piece of information and keep it saved for many timesteps.
# ## 2 - Long Short-Term Memory (LSTM) network
#
# This following figure shows the operations of an LSTM-cell.
#
# <img src="images/LSTM.png" style="width:500;height:400px;">
# <caption><center> **Figure 4**: LSTM-cell. This tracks and updates a "cell state" or memory variable $c^{\langle t \rangle}$ at every time-step, which can be different from $a^{\langle t \rangle}$. </center></caption>
#
# Similar to the RNN example above, you will start by implementing the LSTM cell for a single time-step. Then you can iteratively call it from inside a for-loop to have it process an input with $T_x$ time-steps.
#
# ### About the gates
#
# #### - Forget gate
#
# For the sake of this illustration, lets assume we are reading words in a piece of text, and want use an LSTM to keep track of grammatical structures, such as whether the subject is singular or plural. If the subject changes from a singular word to a plural word, we need to find a way to get rid of our previously stored memory value of the singular/plural state. In an LSTM, the forget gate lets us do this:
#
# $$\Gamma_f^{\langle t \rangle} = \sigma(W_f[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_f)\tag{1} $$
#
# Here, $W_f$ are weights that govern the forget gate's behavior. We concatenate $[a^{\langle t-1 \rangle}, x^{\langle t \rangle}]$ and multiply by $W_f$. The equation above results in a vector $\Gamma_f^{\langle t \rangle}$ with values between 0 and 1. This forget gate vector will be multiplied element-wise by the previous cell state $c^{\langle t-1 \rangle}$. So if one of the values of $\Gamma_f^{\langle t \rangle}$ is 0 (or close to 0) then it means that the LSTM should remove that piece of information (e.g. the singular subject) in the corresponding component of $c^{\langle t-1 \rangle}$. If one of the values is 1, then it will keep the information.
#
# #### - Update gate
#
# Once we forget that the subject being discussed is singular, we need to find a way to update it to reflect that the new subject is now plural. Here is the formulat for the update gate:
#
# $$\Gamma_u^{\langle t \rangle} = \sigma(W_u[a^{\langle t-1 \rangle}, x^{\{t\}}] + b_u)\tag{2} $$
#
# Similar to the forget gate, here $\Gamma_u^{\langle t \rangle}$ is again a vector of values between 0 and 1. This will be multiplied element-wise with $\tilde{c}^{\langle t \rangle}$, in order to compute $c^{\langle t \rangle}$.
#
# #### - Updating the cell
#
# To update the new subject we need to create a new vector of numbers that we can add to our previous cell state. The equation we use is:
#
# $$ \tilde{c}^{\langle t \rangle} = \tanh(W_c[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_c)\tag{3} $$
#
# Finally, the new cell state is:
#
# $$ c^{\langle t \rangle} = \Gamma_f^{\langle t \rangle}* c^{\langle t-1 \rangle} + \Gamma_u^{\langle t \rangle} *\tilde{c}^{\langle t \rangle} \tag{4} $$
#
#
# #### - Output gate
#
# To decide which outputs we will use, we will use the following two formulas:
#
# $$ \Gamma_o^{\langle t \rangle}= \sigma(W_o[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_o)\tag{5}$$
# $$ a^{\langle t \rangle} = \Gamma_o^{\langle t \rangle}* \tanh(c^{\langle t \rangle})\tag{6} $$
#
# Where in equation 5 you decide what to output using a sigmoid function and in equation 6 you multiply that by the $\tanh$ of the previous state.
# ### 2.1 - LSTM cell
#
# **Exercise**: Implement the LSTM cell described in the Figure (3).
#
# **Instructions**:
# 1. Concatenate $a^{\langle t-1 \rangle}$ and $x^{\langle t \rangle}$ in a single matrix: $concat = \begin{bmatrix} a^{\langle t-1 \rangle} \\ x^{\langle t \rangle} \end{bmatrix}$
# 2. Compute all the formulas 1-6. You can use `sigmoid()` (provided) and `np.tanh()`.
# 3. Compute the prediction $y^{\langle t \rangle}$. You can use `softmax()` (provided).
# In[8]:
# GRADED FUNCTION: lstm_cell_forward
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),
c stands for the memory value
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"]
bf = parameters["bf"]
Wi = parameters["Wi"]
bi = parameters["bi"]
Wc = parameters["Wc"]
bc = parameters["bc"]
Wo = parameters["Wo"]
bo = parameters["bo"]
Wy = parameters["Wy"]
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
### START CODE HERE ###
# Concatenate a_prev and xt (≈3 lines)
concat = np.zeros([n_a+n_x,m])
concat[: n_a, :] = a_prev
concat[n_a :, :] = xt
# Compute values for ft, it, cct, c_next, ot, a_next using the formulas given figure (4) (≈6 lines)
ft = sigmoid(np.dot(Wf,concat) + bf)
it = sigmoid(np.dot(Wi,concat) + bi)
cct = np.tanh(np.dot(Wc,concat) + bc)
c_next = ft*c_prev + it*cct
ot = sigmoid(np.dot(Wo,concat) + bo)
a_next = ot*np.tanh(c_next)
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy, a_next) + by)
### END CODE HERE ###
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache
# In[9]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
c_prev = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", c_next.shape)
print("c_next[2] = ", c_next[2])
print("c_next.shape = ", c_next.shape)
print("yt[1] =", yt[1])
print("yt.shape = ", yt.shape)
print("cache[1][3] =", cache[1][3])
print("len(cache) = ", len(cache))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a_next[4]**:
# </td>
# <td>
# [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482
# 0.76566531 0.34631421 -0.00215674 0.43827275]
# </td>
# </tr>
# <tr>
# <td>
# **a_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **c_next[2]**:
# </td>
# <td>
# [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942
# 0.76449811 -0.0981561 -0.74348425 -0.26810932]
# </td>
# </tr>
# <tr>
# <td>
# **c_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **yt[1]**:
# </td>
# <td>
# [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381
# 0.00943007 0.12666353 0.39380172 0.07828381]
# </td>
# </tr>
# <tr>
# <td>
# **yt.shape**:
# </td>
# <td>
# (2, 10)
# </td>
# </tr>
# <tr>
# <td>
# **cache[1][3]**:
# </td>
# <td>
# [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874
# 0.07651101 -1.03752894 1.41219977 -0.37647422]
# </td>
# </tr>
# <tr>
# <td>
# **len(cache)**:
# </td>
# <td>
# 10
# </td>
# </tr>
#
# </table>
# ### 2.2 - Forward pass for LSTM
#
# Now that you have implemented one step of an LSTM, you can now iterate this over this using a for-loop to process a sequence of $T_x$ inputs.
#
# <img src="images/LSTM_rnn.png" style="width:500;height:300px;">
# <caption><center> **Figure 4**: LSTM over multiple time-steps. </center></caption>
#
# **Exercise:** Implement `lstm_forward()` to run an LSTM over $T_x$ time-steps.
#
# **Note**: $c^{\langle 0 \rangle}$ is initialized with zeros.
# In[10]:
# GRADED FUNCTION: lstm_forward
def lstm_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of all the caches, x)
"""
# Initialize "caches", which will track the list of all the caches
caches = []
### START CODE HERE ###
# Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines)
n_x, m, T_x = x.shape
n_y, n_a = parameters['Wy'].shape
# initialize "a", "c" and "y" with zeros (≈3 lines)
a = np.zeros([n_a, m, T_x])
c = np.zeros([n_a, m, T_x])
y = np.zeros([n_y, m, T_x])
# Initialize a_next and c_next (≈2 lines)
a_next = a0
c_next = np.zeros([n_a, m])
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line)
a_next, c_next, yt, cache = lstm_cell_forward(x[:,:,t], a_next, c_next, parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y[:,:,t] = yt
# Save the value of the next cell state (≈1 line)
c[:,:,t] = c_next
# Append the cache into caches (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y, c, caches
# In[11]:
np.random.seed(1)
x = np.random.randn(3,10,7)
a0 = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a, y, c, caches = lstm_forward(x, a0, parameters)
print("a[4][3][6] = ", a[4][3][6])
print("a.shape = ", a.shape)
print("y[1][4][3] =", y[1][4][3])
print("y.shape = ", y.shape)
print("caches[1][1[1]] =", caches[1][1][1])
print("c[1][2][1]", c[1][2][1])
print("len(caches) = ", len(caches))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a[4][3][6]** =
# </td>
# <td>
# 0.172117767533
# </td>
# </tr>
# <tr>
# <td>
# **a.shape** =
# </td>
# <td>
# (5, 10, 7)
# </td>
# </tr>
# <tr>
# <td>
# **y[1][4][3]** =
# </td>
# <td>
# 0.95087346185
# </td>
# </tr>
# <tr>
# <td>
# **y.shape** =
# </td>
# <td>
# (2, 10, 7)
# </td>
# </tr>
# <tr>
# <td>
# **caches[1][1][1]** =
# </td>
# <td>
# [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139
# 0.41005165]
# </td>
#
# </tr>
# <tr>
# <td>
# **c[1][2][1]** =
# </td>
# <td>
# -0.855544916718
# </td>
# </tr>
#
# </tr>
# <tr>
# <td>
# **len(caches)** =
# </td>
# <td>
# 2
# </td>
# </tr>
#
# </table>
# Congratulations! You have now implemented the forward passes for the basic RNN and the LSTM. When using a deep learning framework, implementing the forward pass is sufficient to build systems that achieve great performance.
#
# The rest of this notebook is optional, and will not be graded.
# ## 3 - Backpropagation in recurrent neural networks (OPTIONAL / UNGRADED)
#
# In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers do not need to bother with the details of the backward pass. If however you are an expert in calculus and want to see the details of backprop in RNNs, you can work through this optional portion of the notebook.
#
# When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in recurrent neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are quite complicated and we did not derive them in lecture. However, we will briefly present them below.
# ### 3.1 - Basic RNN backward pass
#
# We will start by computing the backward pass for the basic RNN-cell.
#
# <img src="images/rnn_cell_backprop.png" style="width:500;height:300px;"> <br>
# <caption><center> **Figure 5**: RNN-cell's backward pass. Just like in a fully-connected neural network, the derivative of the cost function $J$ backpropagates through the RNN by following the chain-rule from calculas. The chain-rule is also used to calculate $(\frac{\partial J}{\partial W_{ax}},\frac{\partial J}{\partial W_{aa}},\frac{\partial J}{\partial b})$ to update the parameters $(W_{ax}, W_{aa}, b_a)$. </center></caption>
# #### Deriving the one step backward functions:
#
# To compute the `rnn_cell_backward` you need to compute the following equations. It is a good exercise to derive them by hand.
#
# The derivative of $\tanh$ is $1-\tanh(x)^2$. You can find the complete proof [here](https://www.wyzant.com/resources/lessons/math/calculus/derivative_proofs/tanx). Note that: $ \text{sech}(x)^2 = 1 - \tanh(x)^2$
#
# Similarly for $\frac{ \partial a^{\langle t \rangle} } {\partial W_{ax}}, \frac{ \partial a^{\langle t \rangle} } {\partial W_{aa}}, \frac{ \partial a^{\langle t \rangle} } {\partial b}$, the derivative of $\tanh(u)$ is $(1-\tanh(u)^2)du$.
#
# The final two equations also follow same rule and are derived using the $\tanh$ derivative. Note that the arrangement is done in a way to get the same dimensions to match.
# In[28]:
def rnn_cell_backward(da_next, cache):
"""
Implements the backward pass for the RNN-cell (single time-step).
Arguments:
da_next -- Gradient of loss with respect to next hidden state
cache -- python dictionary containing useful values (output of rnn_cell_forward())
Returns:
gradients -- python dictionary containing:
dx -- Gradients of input data, of shape (n_x, m)
da_prev -- Gradients of previous hidden state, of shape (n_a, m)
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dba -- Gradients of bias vector, of shape (n_a, 1)
"""
# Retrieve values from cache
(a_next, a_prev, xt, parameters) = cache
# Retrieve values from parameters
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
### START CODE HERE ###
# compute the gradient of tanh with respect to a_next (≈1 line)
dtanh = (1 - a_next**2)*da_next
# compute the gradient of the loss with respect to Wax (≈2 lines)
dxt = np.dot(Wax.T,dtanh)
dWax = np.dot(dtanh,xt.T)
# compute the gradient with respect to Waa (≈2 lines)
da_prev = np.dot(Waa.T,dtanh)
dWaa = np.dot(dtanh,a_prev.T)
# compute the gradient with respect to b (≈1 line)
dba = np.sum(dtanh, axis=1, keepdims=True)
### END CODE HERE ###
# Store the gradients in a python dictionary
gradients = {"dxt": dxt, "da_prev": da_prev, "dWax": dWax, "dWaa": dWaa, "dba": dba}
return gradients
# In[29]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
Wax = np.random.randn(5,3)
Waa = np.random.randn(5,5)
Wya = np.random.randn(2,5)
b = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
a_next, yt, cache = rnn_cell_forward(xt, a_prev, parameters)
da_next = np.random.randn(5,10)
gradients = rnn_cell_backward(da_next, cache)
print("gradients[\"dxt\"][1][2] =", gradients["dxt"][1][2])
print("gradients[\"dxt\"].shape =", gradients["dxt"].shape)
print("gradients[\"da_prev\"][2][3] =", gradients["da_prev"][2][3])
print("gradients[\"da_prev\"].shape =", gradients["da_prev"].shape)
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWax\"].shape =", gradients["dWax"].shape)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWaa\"].shape =", gradients["dWaa"].shape)
print("gradients[\"dba\"][4] =", gradients["dba"][4])
print("gradients[\"dba\"].shape =", gradients["dba"].shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **gradients["dxt"][1][2]** =
# </td>
# <td>
# -0.460564103059
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dxt"].shape** =
# </td>
# <td>
# (3, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da_prev"][2][3]** =
# </td>
# <td>
# 0.0842968653807
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da_prev"].shape** =
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"][3][1]** =
# </td>
# <td>
# 0.393081873922
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"].shape** =
# </td>
# <td>
# (5, 3)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"][1][2]** =
# </td>
# <td>
# -0.28483955787
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"].shape** =
# </td>
# <td>
# (5, 5)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"][4]** =
# </td>
# <td>
# [ 0.80517166]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# </table>
# #### Backward pass through the RNN
#
# Computing the gradients of the cost with respect to $a^{\langle t \rangle}$ at every time-step $t$ is useful because it is what helps the gradient backpropagate to the previous RNN-cell. To do so, you need to iterate through all the time steps starting at the end, and at each step, you increment the overall $db_a$, $dW_{aa}$, $dW_{ax}$ and you store $dx$.
#
# **Instructions**:
#
# Implement the `rnn_backward` function. Initialize the return variables with zeros first and then loop through all the time steps while calling the `rnn_cell_backward` at each time timestep, update the other variables accordingly.
# In[42]:
def rnn_backward(da, cache):
"""
Implement the backward pass for a RNN over an entire sequence of input data.
Arguments:
da -- Upstream gradients of all hidden states, of shape (n_a, m, T_x)
caches -- tuple containing information from the forward pass (rnn_forward)
Returns:
gradients -- python dictionary containing:
dx -- Gradient w.r.t. the input data, numpy-array of shape (n_x, m, T_x)
da0 -- Gradient w.r.t the initial hidden state, numpy-array of shape (n_a, m)
dWax -- Gradient w.r.t the input's weight matrix, numpy-array of shape (n_a, n_x)
dWaa -- Gradient w.r.t the hidden state's weight matrix, numpy-arrayof shape (n_a, n_a)
dba -- Gradient w.r.t the bias, of shape (n_a, 1)
"""
### START CODE HERE ###
# Retrieve values from the first cache (t=1) of caches (≈2 lines)
(caches, x) = cache
(a1, a0, x1, parameters) = caches[0]
# Retrieve dimensions from da's and x1's shapes (≈2 lines)
n_a, m, T_x = da.shape
n_x, m = x1.shape
# initialize the gradients with the right sizes (≈6 lines)
dx = np.zeros([n_x, m, T_x])
dWax = np.zeros([n_a, n_x])
dWaa = np.zeros([n_a, n_a])
dba = np.zeros([n_a, 1])
da0 = np.zeros([n_a, m])
da_prevt = np.zeros([n_a, m])
# Loop through all the time steps
for t in reversed(range(T_x)):
# Compute gradients at time step t. Choose wisely the "da_next" and the "cache" to use in the backward propagation step. (≈1 line)
gradients = rnn_cell_backward(da[:,:,t] + da_prevt, caches[t])
# Retrieve derivatives from gradients (≈ 1 line)
dxt, da_prevt, dWaxt, dWaat, dbat = gradients["dxt"], gradients["da_prev"], gradients["dWax"], gradients["dWaa"], gradients["dba"]
# Increment global derivatives w.r.t parameters by adding their derivative at time-step t (≈4 lines)
dx[:, :, t] = dxt
dWax += dWaxt
dWaa += dWaat
dba += dbat
# Set da0 to the gradient of a which has been backpropagated through all time-steps (≈1 line)
da0 = da_prevt
### END CODE HERE ###
# Store the gradients in a python dictionary
gradients = {"dx": dx, "da0": da0, "dWax": dWax, "dWaa": dWaa,"dba": dba}
return gradients
# In[43]:
np.random.seed(1)
x = np.random.randn(3,10,4)
a0 = np.random.randn(5,10)
Wax = np.random.randn(5,3)
Waa = np.random.randn(5,5)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
a, y, caches = rnn_forward(x, a0, parameters)
da = np.random.randn(5, 10, 4)
gradients = rnn_backward(da, caches)
print("gradients[\"dx\"][1][2] =", gradients["dx"][1][2])
print("gradients[\"dx\"].shape =", gradients["dx"].shape)
print("gradients[\"da0\"][2][3] =", gradients["da0"][2][3])
print("gradients[\"da0\"].shape =", gradients["da0"].shape)
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWax\"].shape =", gradients["dWax"].shape)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWaa\"].shape =", gradients["dWaa"].shape)
print("gradients[\"dba\"][4] =", gradients["dba"][4])
print("gradients[\"dba\"].shape =", gradients["dba"].shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **gradients["dx"][1][2]** =
# </td>
# <td>
# [-2.07101689 -0.59255627 0.02466855 0.01483317]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dx"].shape** =
# </td>
# <td>
# (3, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da0"][2][3]** =
# </td>
# <td>
# -0.314942375127
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da0"].shape** =
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"][3][1]** =
# </td>
# <td>
# 11.2641044965
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"].shape** =
# </td>
# <td>
# (5, 3)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"][1][2]** =
# </td>
# <td>
# 2.30333312658
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"].shape** =
# </td>
# <td>
# (5, 5)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"][4]** =
# </td>
# <td>
# [-0.74747722]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# </table>
# ## 3.2 - LSTM backward pass
# ### 3.2.1 One Step backward
#
# The LSTM backward pass is slighltly more complicated than the forward one. We have provided you with all the equations for the LSTM backward pass below. (If you enjoy calculus exercises feel free to try deriving these from scratch yourself.)
#
# ### 3.2.2 gate derivatives
#
# $$d \Gamma_o^{\langle t \rangle} = da_{next}*\tanh(c_{next}) * \Gamma_o^{\langle t \rangle}*(1-\Gamma_o^{\langle t \rangle})\tag{7}$$
#
# $$d\tilde c^{\langle t \rangle} = dc_{next}*\Gamma_u^{\langle t \rangle}+ \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * i_t * da_{next} * \tilde c^{\langle t \rangle} * (1-\tanh(\tilde c)^2) \tag{8}$$
#
# $$d\Gamma_u^{\langle t \rangle} = dc_{next}*\tilde c^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * \tilde c^{\langle t \rangle} * da_{next}*\Gamma_u^{\langle t \rangle}*(1-\Gamma_u^{\langle t \rangle})\tag{9}$$
#
# $$d\Gamma_f^{\langle t \rangle} = dc_{next}*\tilde c_{prev} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * c_{prev} * da_{next}*\Gamma_f^{\langle t \rangle}*(1-\Gamma_f^{\langle t \rangle})\tag{10}$$
#
# ### 3.2.3 parameter derivatives
#
# $$ dW_f = d\Gamma_f^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{11} $$
# $$ dW_u = d\Gamma_u^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{12} $$
# $$ dW_c = d\tilde c^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{13} $$
# $$ dW_o = d\Gamma_o^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{14}$$
#
# To calculate $db_f, db_u, db_c, db_o$ you just need to sum across the horizontal (axis= 1) axis on $d\Gamma_f^{\langle t \rangle}, d\Gamma_u^{\langle t \rangle}, d\tilde c^{\langle t \rangle}, d\Gamma_o^{\langle t \rangle}$ respectively. Note that you should have the `keep_dims = True` option.
#
# Finally, you will compute the derivative with respect to the previous hidden state, previous memory state, and input.
#
# $$ da_{prev} = W_f^T*d\Gamma_f^{\langle t \rangle} + W_u^T * d\Gamma_u^{\langle t \rangle}+ W_c^T * d\tilde c^{\langle t \rangle} + W_o^T * d\Gamma_o^{\langle t \rangle} \tag{15}$$
# Here, the weights for equations 13 are the first n_a, (i.e. $W_f = W_f[:n_a,:]$ etc...)
#
# $$ dc_{prev} = dc_{next}\Gamma_f^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} * (1- \tanh(c_{next})^2)*\Gamma_f^{\langle t \rangle}*da_{next} \tag{16}$$
# $$ dx^{\langle t \rangle} = W_f^T*d\Gamma_f^{\langle t \rangle} + W_u^T * d\Gamma_u^{\langle t \rangle}+ W_c^T * d\tilde c_t + W_o^T * d\Gamma_o^{\langle t \rangle}\tag{17} $$
# where the weights for equation 15 are from n_a to the end, (i.e. $W_f = W_f[n_a:,:]$ etc...)
#
# **Exercise:** Implement `lstm_cell_backward` by implementing equations $7-17$ below. Good luck! :)
# In[96]:
def lstm_cell_backward(da_next, dc_next, cache):
"""
Implement the backward pass for the LSTM-cell (single time-step).
Arguments:
da_next -- Gradients of next hidden state, of shape (n_a, m)
dc_next -- Gradients of next cell state, of shape (n_a, m)
cache -- cache storing information from the forward pass
Returns:
gradients -- python dictionary containing:
dxt -- Gradient of input data at time-step t, of shape (n_x, m)
da_prev -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m)
dc_prev -- Gradient w.r.t. the previous memory state, of shape (n_a, m, T_x)
dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x)
dWo -- Gradient w.r.t. the weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1)
dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1)
dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1)
dbo -- Gradient w.r.t. biases of the output gate, of shape (n_a, 1)
"""
# Retrieve information from "cache"
(a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache
# Retrieve values from parameters
Wf = parameters['Wf']
Wo = parameters['Wo']
Wi = parameters['Wi']
Wc = parameters['Wc']
### START CODE HERE ###
# Retrieve dimensions from xt's and a_next's shape (≈2 lines)
n_x, m = xt.shape
n_a, m = a_next.shape
# Compute gates related derivatives, you can find their values can be found by looking carefully at equations (7) to (10) (≈4 lines)
dot = da_next * np.tanh(c_next) * ot * (1 - ot)
dcct = (dc_next * it + ot * (1 - np.square(np.tanh(c_next))) * it * da_next) * (1 - np.square(cct))
dit = (dc_next * cct + ot * (1 - np.square(np.tanh(c_next))) * cct * da_next) * it * (1 - it)
dft = (dc_next * c_prev + ot *(1 - np.square(np.tanh(c_next))) * c_prev * da_next) * ft * (1 - ft)
# Code equations (7) to (10) (≈4 lines)
##dit = None
##dft = None
##dot = None
##dcct = None
concat = np.concatenate((a_prev, xt))
# Compute parameters related derivatives. Use equations (11)-(14) (≈8 lines)
dWf = np.dot(dft, concat.T)
dWi = np.dot(dit, concat.T)
dWc = np.dot(dcct, concat.T)
dWo = np.dot(dot, concat.T)
dbf = np.sum(dft, axis=1 ,keepdims = True)
dbi = np.sum(dit, axis=1, keepdims = True)
dbc = np.sum(dcct, axis=1, keepdims = True)
dbo = np.sum(dot, axis=1, keepdims = True)
# Compute derivatives w.r.t previous hidden state, previous memory state and input. Use equations (15)-(17). (≈3 lines)
da_prev = np.dot(Wf[:, :n_a].T, dft) + np.dot(Wi[:, :n_a].T, dit) + np.dot(Wc[:, :n_a].T, dcct) + np.dot(Wo[:, :n_a].T, dot)
dc_prev = dc_next * ft + ot * (1 - np.square(np.tanh(c_next))) * ft * da_next
dxt = np.dot(Wf[:, n_a:].T, dft) + np.dot(Wi[:, n_a:].T, dit) + np.dot(Wc[:, n_a:].T, dcct) + np.dot(Wo[:, n_a:].T, dot)
### END CODE HERE ###
# Save gradients in dictionary
gradients = {"dxt": dxt, "da_prev": da_prev, "dc_prev": dc_prev, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi,
"dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo}
return gradients
# In[97]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
c_prev = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = | np.random.randn(5, 5+3) | numpy.random.randn |
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Also if needed: retab
'''
TEST equimap
'''
from __future__ import (unicode_literals, absolute_import, \
print_function, division)
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import time
#import warnings
if __name__ == '__main__':
#print('path 1 =', sys.path)
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
#print('path 2 =', sys.path)
# Local modules
import imas
import equimap
#import imas_west
#import pywed as pw
shot = 53221
tol_val = 1E-10
# For 2D plots
interp_points = 60
# FIRST POINT B_NORM
# ------------------
time_in = np.linspace(36, 37, 10)
Phi_in = np.linspace(0, 2*np.pi/18, 100)
R_in = np.full(Phi_in.shape, 3)
Z_in = np.zeros(R_in.shape)
# Read equilibrium data
idd = imas.ids(shot, 0)
idd.open_env('imas_public', 'west', '3')
idd.equilibrium.get()
out = idd.equilibrium
equiDict = {}
# Declaration of arrays 2d plots
equi_grid = idd.equilibrium.grids_ggd[0].grid[0]
NbrPoints = len(equi_grid.space[0].objects_per_dimension[0].object)
equiDict['r'] = np.full(NbrPoints, np.nan)
equiDict['z'] = np.full(NbrPoints, np.nan)
for ii in range(NbrPoints):
equiDict['r'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[0]
equiDict['z'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[1]
# For 2D plots
R_all = np.linspace(np.min(equiDict['r']), np.max(equiDict['r']), interp_points)
Z_all = np.linspace(np.min(equiDict['z']), np.max(equiDict['z']), interp_points)
R_all_tot = np.repeat(R_all, interp_points)
Z_all_tot = np.tile(Z_all, interp_points)
Rr = R_all_tot.reshape((interp_points, interp_points))
Zr = Z_all_tot.reshape((interp_points, interp_points))
# CALL EQUIMAP
start = time.time()
oute = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_norm')
end = time.time()
print()
print('time in equimap.get b_norm =', end - start)
print()
print('oute.shape b_norm =', oute.shape)
# CALL EQUIMAP
start = time.time()
oute_noR = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_norm', no_ripple=True)
end = time.time()
print()
print('time in equimap.get b_norm no Ripple =', end - start)
print()
print('oute.shape b_norm no ripple =', oute_noR.shape)
print()
print('Mean value B_norm ripple =', np.mean(oute[int(0.5*oute.shape[0]), :]))
print('Mean value B_norm NO ripple =', \
np.mean(oute_noR[int(0.5*oute_noR.shape[0]), :]))
diff_mean_val = np.mean(oute[int(0.5*oute.shape[0]), :]) \
- np.mean(oute_noR[int(0.5*oute_noR.shape[0]), :])
print('Diff mean values =', diff_mean_val)
percent_diff = np.abs(100*diff_mean_val \
/ np.mean(oute[int(0.5*oute.shape[0]), :]))
print('Percent diff mean values =', percent_diff)
# CHECK
# -----
if (np.abs(percent_diff - 0.011052598088) > tol_val):
print()
print('ERROR: Higher than tolerance percent difference ' \
+ str(np.abs(percent_diff - 0.011052598088)))
print()
#raise RuntimeError
# FOR:
# shot = 53221
# time_in = np.linspace(36, 37, 10)
# Phi_in = np.linspace(0, 2*np.pi/18, 100)
# R_in = np.full(Phi_in.shape, 3)
# Z_in = np.zeros(R_in.shape)
# RESULTS:
# Mean value B_norm ripple = 3.05593472975
# Mean value B_norm NO ripple = 3.05627248994
# Diff mean values = -0.000337760183512
# Percent diff mean values = 0.011052598088
print()
# PLOTS
plt.figure()
plt.plot(time_in, oute[:, -1], label='B_norm at R={0}, Phi=Z=0'.format(R_in[-1]))
plt.plot(time_in, oute_noR[:, -1], label='B_norm no ripple at R={0}, Phi=Z=0'.format(R_in[-1]))
plt.legend()
plt.xlabel('Time [s]')
plt.ylabel('B_norm [T]')
plt.figure()
plt.plot(Phi_in, oute[int(0.5*oute.shape[0]), :], \
label='B_norm at t={0:.2f}, R={1}, Z=0'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1]))
plt.plot(Phi_in, oute_noR[int(0.5*oute.shape[0]), :], \
label='B_norm no ripple at t={0:.2f}, R={1}, Z=0'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1]))
plt.legend()
plt.xlabel('Phi [rad]')
plt.ylabel('B_norm [T]')
# SECOND POSITION B_NORM
# ----------------------
t_ignitron = []
t_ignitron.append(32)
print()
print('t_igni =', t_ignitron[0])
print()
time_in = np.linspace(t_ignitron[0], 38, 10)
Phi_in = | np.linspace(0, 2*np.pi/18, 100) | numpy.linspace |
"""Miscellaneous functions to analyze the simulation results.
Index
-----
.. currentmodule:: nanoqm.analysis.tools
.. autosummary::
API
---
"""
import os
from typing import List, Tuple
import numpy as np
import pyparsing as pa
from scipy.optimize import curve_fit
from ..common import fs_to_cm, h2ev, hbar, r2meV
""" Functions to fit data """
def gauss_function(x: float, sigma: float) -> np.ndarray:
"""Compute a Gaussian function used for fitting data."""
return np.exp(-0.5 * (-x / sigma) ** 2)
def lorentzian_function(x_L, sigma_L, amplitude_L):
"""Compute a Lorentzian function used for fitting data."""
return amplitude_L * sigma_L**2 / (sigma_L**2 + x_L ** 2)
def exp_function(x, x0, amplitude):
"""Compute an Exponential function used for fitting data."""
return amplitude * np.exp(-x / x0)
def sine_function(t_phonon, amplitude, offset, phase, n_periods, dt):
"""Compute a sinusoidal function used for fitting data."""
t = np.arange(0, n_periods * t_phonon, dt)
# (gap) energy
y = offset + amplitude * (np.sin(2 * np.pi * t / t_phonon + phase))
y_mean = np.mean(y)
y_dummy = y / h2ev # to delete ...
return y_dummy, y, y_mean, t
def sqrt_func(x, a):
"""Compute a square root function used for fitting data."""
return a * np.sqrt(x)
def func_conv(x_real: np.ndarray, x_grid: np.ndarray, delta: float) -> np.ndarray:
"""Compute a convolution on a grid using a Gaussian function."""
return np.exp(-2 * (x_grid - x_real) ** 2 / delta ** 2)
def convolute(x: np.ndarray, y: np.ndarray, x_points: np.ndarray, sigma: float) -> np.ndarray:
"""Convolute a spectrum on a grid of x_points.
You need as input x, y and the grid where to convolute.
"""
# Compute gaussian prefactor
prefactor = np.sqrt(2.0) / (sigma * np.sqrt(np.pi))
# Convolute spectrum over grid
y_points = prefactor * np.stack([
np.sum(y * func_conv(x, x_point, sigma)) for x_point in x_points
])
return y_points
""" Useful functions to compute autocorrelation, dephasing, etc. """
def autocorrelate(f: np.ndarray) -> Tuple[float, float]:
"""Compute the un-normalized and normalized autocorrelation of a function."""
d_f = f - f.mean()
d_f2 = np.append(d_f, d_f, axis=0)
# Compute the autocorrelation function
uacf = np.correlate(d_f, d_f2, "valid")[:d_f.size] / d_f.size
# Compute the normalized autocorrelation function
nacf = uacf / uacf[0]
return uacf, nacf
def spectral_density(f, dt):
"""Fourier Transform of a given function f using a dense grid with 100000 points.
In the case of a FFT of a normalized autocorrelation function,
this corresponds to a spectral density
"""
n_pts = 100000
f_fft = abs(1 / np.sqrt(2 * np.pi) * np.fft.rfft(f, n_pts) * dt) ** 2
# Fourier Transform of the time axis
freq = np.fft.rfftfreq(n_pts, dt)
# Conversion of the x axis (given in cycles/fs) to cm-1
freq = freq * fs_to_cm
return f_fft, freq
def dephasing(f: np.ndarray, dt: float):
"""Compute the dephasing time of a given function.
f = energies, in eV (if other function/unit: then the line_broadening will not be in eV)
dt = time step, in fs
Use the optical response formalisms:
<NAME>, Principles of Nonlinear Optical Spectroscopy, 1995
About the implementation we use the 2nd order cumulant expansion.
See also eq. (2) in : Kilina et al. Phys. Rev. Lett., 110, 180404, (2013)
To calculate the dephasing time tau we fit the dephasing function to a
gaussian of the type : exp(-0.5 * (-x / tau) ** 2)
"""
# Conversion of hbar to hartree * fs
hbar_au = hbar / h2ev
ts = np.arange(f.shape[0]) * dt
cumu_ii = np.asarray([np.trapz(f[0:i + 1], dx=(dt / hbar), axis=0) for i in range(ts.size)])
cumu_i = np.asarray([np.trapz(cumu_ii[0:i + 1], dx=(dt / hbar), axis=0)
for i in range(ts.size)])
deph = np.exp(-cumu_i)
return deph, ts
def fit_dephasing(fit_func, deph, ts, res, t_deph_guess):
"""Work in progress (?).
fit_func = 0 for Gaussian fit, or 1 for exponential fit
deph = dephasing function vs. time "
ts = time, in fs
res = factor by which to increase the time resolution of ts for the fit
t_deph_guess = initial guess, in fs
"""
np.seterr(over='ignore')
if fit_func == 0:
# fit with Gaussian
popt, pcov = curve_fit(gauss_function, ts, deph, p0=(t_deph_guess, deph[0])) # [0]
ts_fit = np.arange(res * deph.shape[0]) * dt / res
deph_fit = popt[1] * np.exp(-0.5 * (-ts_fit / popt[0]) ** 2)
deph_time = popt[0] # in fs (defined as standard deviation of a Gaussian)
e_fwhm = std_to_fwhm * hbar / deph_time # FWHM in eV
perr = np.sqrt(np.diag(pcov))
deph_time_err = perr[0] # error (standard deviation) for the deph. time
e_fwhm_err = deph_time_err * std_to_fwhm * hbar / (deph_time ** 2) # error (standard deviation) for the FWHM
elif fit_func == 1:
# fit with exponential
popt, pcov = curve_fit(exp_function, ts, deph, p0=(t_deph_guess, deph[0])) # [0]
ts_fit = np.arange(res * deph.shape[0]) * dt / res
deph_fit = popt[1] * np.exp(-ts_fit / popt[0])
deph_time = popt[0] # in fs (defined as the exp. time constant)
e_fwhm = 2 * hbar / deph_time # FWHM in eV
perr = np.sqrt( | np.diag(pcov) | numpy.diag |
datadir = '/home/suriya/_/tf/datasets/qa/bAbI/tasks/en-10k/'
WHITELIST = 'abcdefghijklmnopqrstuvwxyz, '
import os
import nltk
import itertools
import numpy as np
import pickle
from random import shuffle
def get_files():
# get list of files
filenames = [ fname for fname in os.listdir(datadir) ]
# sort them
filenames = sorted(filenames,
key = lambda x : int( x[2 : x.find('_')] ))
# add path to filename
return [ datadir + fname for fname in filenames ]
def read_task(task_id):
# get file names
filenames = get_files()
# get task specific files
tfilenames = filenames[(task_id-1)*2 : (task_id-1)*2 + 2]
# read from files
content = ''
for filename in tfilenames:
with open(filename) as f:
content += f.read().lower()
return content.split('\n')[:-1]
def read_all_tasks():
# get file names
filenames = get_files()
# read from files
content = ''
for filename in filenames:
with open(filename) as f:
content += f.read().lower()
return content.split('\n')[:-1]
def reshape_content(lines):
stories = []
story_lines = []
questions = []
answers = []
for line in lines:
if line and '?' in line:
# parse answer
try:
answer = line[line.find('?')+1:].split('\t')[1]
except:
print(line)
# parse question
question = ' '.join(line.split(' ')[1:])
question = question[ : question.find('?') + 1 ]
# add items to lists
stories.append(story_lines)
questions.append(filter_word(question))
answers.append(filter_word(answer))
# empty(v) story_lines
story_lines = []
else:
if line:
# add lines of each story(facts) to temp list
story_lines.append( filter_word(' '.join(line.split(' ')[1:]) ) )
return stories, questions, answers
def index_(tokenized_sentences):
# get frequency distribution
freq_dist = nltk.FreqDist(itertools.chain(tokenized_sentences))
# get vocabulary of 'vocab_size' most used words
vocab = freq_dist.most_common()
# index2word
index2word = ['_'] + ['?'] + ['.'] + [ x[0] for x in vocab ] # - : zero paddding
# word2index
word2index = dict([(w,i) for i,w in enumerate(index2word)] )
return index2word, word2index, freq_dist
def tokenize(lines):
tokenized = []
for line in lines:
words = []
for word in line.split(' '):
words.append( ''.join(
[ ch for ch in word if ch in WHITELIST ] ))
tokenized.append(words)
return tokenized
def filter_word(word):
return ''.join([ ch for ch in word if ch in WHITELIST ])
def zero_pad(stories, questions, answers, w2i):
num_lines = len(answers)
# answers
idx_a = np.array([ w2i[ans] for ans in answers ], dtype=np.int32)
# questions
max_q_sent_len = max([ len(q.split(' ')) for q in questions ])
idx_q = np.zeros([num_lines, max_q_sent_len + 1], dtype=np.int32)
# iterate through questions
for i,q in enumerate(questions):
for j,w in enumerate(q.split(' ')):
idx_q[i][j] = w2i[w.replace('?', '')]
# add ? to question
idx_q[i][j+1] = w2i['?']
# stories
max_s_sent_len = max([ len(line.split(' '))
for story in stories for line in story ])
max_s_lines = max([ len(story) for story in stories ])
idx_s = np.zeros([num_lines, max_s_lines, max_s_sent_len + 1],
dtype=np.int32)
for i,story in enumerate(stories):
for j,line in enumerate(story):
for k,w in enumerate(line.split(' ')):
idx_s[i][j][k] = w2i[w.replace('.', '')]
idx_s[i][j][k+1] = w2i['.']
return idx_a, idx_q, idx_s
def process_data():
lines = read_all_tasks()
stories, questions, answers = reshape_content(lines)
# shuffle data
data = list(zip(stories, questions, answers))
shuffle(data)
stories, questions, answers = zip(*data)
# index data
tokenized = [ w for story in stories
for line in story for w in line.split(' ') ]
tokenized += [ w for q in questions for w in q.split(' ') ]
tokenized += [ w for w in answers]
# get loopups
idx2w, w2idx, freq_dist = index_(tokenized)
# zero padding
idx_a, idx_q, idx_s = zero_pad(stories, questions, answers, w2idx)
print('shapes')
print(idx_a.shape, idx_q.shape, idx_s.shape)
# save them
np.save('idx_q.npy', idx_q)
np.save('idx_s.npy', idx_s)
np.save('idx_a.npy', idx_a)
# let us now save the necessary dictionaries
metadata = {
'w2idx' : w2idx,
'idx2w' : idx2w,
'freq_dist' : freq_dist
}
# write to disk : data control dictionaries
with open('metadata.pkl', 'wb') as f:
pickle.dump(metadata, f)
if __name__ == '__main__':
process_data()
def load_data(PATH=''):
# read data control dictionaries
with open(PATH + 'metadata.pkl', 'rb') as f:
metadata = pickle.load(f)
# read numpy arrays
idx_s = | np.load(PATH + 'idx_s.npy') | numpy.load |
#----------------------------------------------------------------
# NAME || AM || e-mail
# <NAME> || 432 || <EMAIL>
# <NAME> || 431 || <EMAIL>
#----------------------------------------------------------------
# Course: Optimization
# Project 1
# Written in Python 3.8.6
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.linalg import inv, cholesky, norm
from numpy.linalg.linalg import LinAlgError
import time
import sys
# Globals
# Our data for the first 30 days. Training set.
df_covid_data = pd.read_csv("./Data/covid_data_30_GR.dat", delim_whitespace=True, names = ["Day", "Cases"])
# Scaling step
df_covid_data["Scaled Day"] = df_covid_data["Day"].div(10)
df_covid_data["Scaled Cases"] = df_covid_data["Cases"].div(10000)
# Our data for days 13 - 17 of November based on https://covid19.who.int/region/euro/country/gr
df_covid_data_testset = pd.DataFrame(data = np.array([[31, 66637], [32, 69675], [33, 72510], [34, 74205], [35, 76403]]),
columns = ["Day", "Cases"])
# Scaling step
df_covid_data_testset["Scaled Day"] = df_covid_data_testset["Day"].div(10)
df_covid_data_testset["Scaled Cases"] = df_covid_data_testset["Cases"].div(10000)
# Defining the polynomial model, its gradiend and its hessian matrix
def model(a, x):
return a[0] + a[1] * x + a[2] * np.power(x, 2) + a[3] * | np.power(x, 3) | numpy.power |
import numpy as np
import matplotlib.pyplot as plt
# be careful with deep and shallow copies
class Quat(object):
def __init__(self, *args, **kwargs):
self.quatCoef = np.zeros(4, dtype=float)
# construt with Bunge euler angles (radians, ZXZ)
if len(args) == 3:
ph1 = args[0]
phi = args[1]
ph2 = args[2]
self.quatCoef[0] = np.cos(phi / 2.0) * np.cos((ph1 + ph2) / 2.0)
self.quatCoef[1] = -np.sin(phi / 2.0) * np.cos((ph1 - ph2) / 2.0)
self.quatCoef[2] = -np.sin(phi / 2.0) * np.sin((ph1 - ph2) / 2.0)
self.quatCoef[3] = -np.cos(phi / 2.0) * np.sin((ph1 + ph2) / 2.0)
# construt with array of quat coefficients
elif len(args) == 1:
self.quatCoef = args[0]
# construt with quat coefficients
elif len(args) == 4:
self.quatCoef[0] = args[0]
self.quatCoef[1] = args[1]
self.quatCoef[2] = args[2]
self.quatCoef[3] = args[3]
if (self.quatCoef[0] < 0):
self.quatCoef = self.quatCoef * -1
# overload static method with instance method of same name in object
self.plotIPF = self._plotIPF
@classmethod
def fromAxisAngle(cls, axis, angle):
"""Create a quat object from an axis angle pair
Args:
axis (np.array size 3): Axis of rotation
angle (float): Rotation arround axis (radians)
Returns:
Quat: Initialised Quat object
"""
# normalise the axis vector
axis = axis / np.sqrt(np.dot(axis, axis))
# calculate quat coefficients
quatCoef = np.zeros(4, dtype=float)
quatCoef[0] = np.cos(angle / 2)
quatCoef[1:4] = np.sin(angle / 2) * axis
# call constructor
return cls(quatCoef)
def eulerAngles(self):
# See Melcher, <NAME>, <NAME>, <NAME>, B. Conversion of EBSD data by a
# quaternion based algorithm to be used for grain structure simulations
# or
# Rowenhorst, D et al. Consistent representations of and conversions between 3D rotations
# P = +1
eulers = np.empty(3, dtype=float)
q = self.quatCoef
q03 = q[0]**2 + q[3]**2
q12 = q[1]**2 + q[2]**2
chi = np.sqrt(q03 * q12)
if (chi == 0 and q12 == 0):
eulers[0] = np.arctan2(-2 * q[0] * q[3],
q[0]**2 - q[3]**2)
eulers[1] = 0
eulers[2] = 0
elif (chi == 0 and q03 == 0):
eulers[0] = np.arctan2(2 * q[1] * q[2],
q[1]**2 - q[2]**2)
eulers[1] = np.pi
eulers[2] = 0
else:
cosPh1 = (-q[0] * q[1] - q[2] * q[3]) / chi
sinPh1 = (-q[0] * q[2] + q[1] * q[3]) / chi
cosPhi = q[0]**2 + q[3]**2 - q[1]**2 - q[2]**2
sinPhi = 2 * chi
cosPh2 = (-q[0] * q[1] + q[2] * q[3]) / chi
sinPh2 = (q[1] * q[3] + q[0] * q[2]) / chi
eulers[0] = np.arctan2(sinPh1, cosPh1)
eulers[1] = np.arctan2(sinPhi, cosPhi)
eulers[2] = np.arctan2(sinPh2, cosPh2)
if eulers[0] < 0:
eulers[0] += 2 * np.pi
if eulers[2] < 0:
eulers[2] += 2 * np.pi
return eulers
def rotMatrix(self):
rotMatrix = np.empty((3, 3), dtype=float)
q = self.quatCoef
qbar = q[0]**2 - q[1]**2 - q[2]**2 - q[3]**2
rotMatrix[0, 0] = qbar + 2 * q[1]**2
rotMatrix[0, 1] = 2 * (q[1] * q[2] - q[0] * q[3])
rotMatrix[0, 2] = 2 * (q[1] * q[3] + q[0] * q[2])
rotMatrix[1, 0] = 2 * (q[1] * q[2] + q[0] * q[3])
rotMatrix[1, 1] = qbar + 2 * q[2]**2
rotMatrix[1, 2] = 2 * (q[2] * q[3] - q[0] * q[1])
rotMatrix[2, 0] = 2 * (q[1] * q[3] - q[0] * q[2])
rotMatrix[2, 1] = 2 * (q[2] * q[3] + q[0] * q[1])
rotMatrix[2, 2] = qbar + 2 * q[3]**2
return rotMatrix
# show components when the quat is printed
def __repr__(self):
return "[%.4f, %.4f, %.4f, %.4f]" % (self.quatCoef[0], self.quatCoef[1], self.quatCoef[2], self.quatCoef[3])
def __str__(self):
return "[%.4f, %.4f, %.4f, %.4f]" % (self.quatCoef[0], self.quatCoef[1], self.quatCoef[2], self.quatCoef[3])
def _plotIPF(self, direction, symGroup, **kwargs):
Quat.plotIPF([self], direction, symGroup, **kwargs)
# overload * operator for quaterion product and vector product
def __mul__(self, right):
if isinstance(right, type(self)): # another quat
newQuatCoef = np.zeros(4, dtype=float)
newQuatCoef[0] = (self.quatCoef[0] * right.quatCoef[0] -
np.dot(self.quatCoef[1:4], right.quatCoef[1:4]))
newQuatCoef[1:4] = (self.quatCoef[0] * right.quatCoef[1:4] +
right.quatCoef[0] * self.quatCoef[1:4] +
np.cross(self.quatCoef[1:4], right.quatCoef[1:4]))
return Quat(newQuatCoef)
raise TypeError()
# # overload % operator for dot product
# def __mod__(self, right):
def dot(self, right):
if isinstance(right, type(self)):
return np.dot(self.quatCoef, right.quatCoef)
raise TypeError()
# overload + operator
def __add__(self, right):
if isinstance(right, type(self)):
return Quat(self.quatCoef + right.quatCoef)
raise TypeError()
# overload += operator
def __iadd__(self, right):
if isinstance(right, type(self)):
self.quatCoef += right.quatCoef
return self
raise TypeError()
# allow array like setting/getting of components
def __getitem__(self, key):
return self.quatCoef[key]
def __setitem__(self, key, value):
self.quatCoef[key] = value
return
def norm(self):
return np.sqrt(np.dot(self.quatCoef[0:4], self.quatCoef[0:4]))
def normalise(self):
self.quatCoef /= self.norm()
return
# also the inverse if this is a unit quaterion
@property
def conjugate(self):
return Quat(self.quatCoef[0], -self.quatCoef[1], -self.quatCoef[2], -self.quatCoef[3])
def transformVector(self, vector):
"""Transforms vector by the quaternion. For EBSD quaterions this
is a transformation from sample space to crystal space. Perform
on conjugate of quaternion for crystal to sample.
Args:
vector (numpy.ndarray): Vector to transform
Returns:
numpy.ndarray: Transformed vector
"""
if isinstance(vector, np.ndarray) and vector.shape == (3,):
vectorQuat = Quat(0, vector[0], vector[1], vector[2])
vectorQuatTransformed = (self * vectorQuat) * self.conjugate
vectorTransformed = vectorQuatTransformed.quatCoef[1:4]
return vectorTransformed
raise TypeError("Vector must be a size 3 numpy array.")
def misOri(self, right, symGroup, returnQuat=0):
"""Calculate misorientation angle between 2 orientations taking
into account the symmetries of the crystal structure.
Angle is 2*arccos(output).
Args:
rigth (quat): Orientation to find misorientation to
symGroup (str): Crystal type (cubic, hexagonal)
returnQuat (int): What to return
Returns:
various: returnQuat = 0 - misorientation
returnQuat = 1 - symmetric equivalent with min misorientation
returnQuat = 2 - both
"""
if isinstance(right, type(self)):
minMisOri = 0 # actually looking for max of this as it is cos of misoriention angle
for sym in Quat.symEqv(symGroup): # loop over symmetrically equivelent orienations
quatSym = sym * right
currentMisOri = abs(self.dot(quatSym))
if currentMisOri > minMisOri: # keep if misorientation lower
minMisOri = currentMisOri
minQuatSym = quatSym
if returnQuat == 1:
return minQuatSym
elif returnQuat == 2:
return minMisOri, minQuatSym
else:
return minMisOri
raise TypeError("Input must be a quaternion.")
def misOriAxis(self, right):
"""Calculate misorientation axis between 2 orientations.
This does not consider symmetries of the crystal structure.
Args:
rigth (quat): Orientation to find misorientation axis to
Returns:
numpy.ndarray: axis of misorientation
"""
if isinstance(right, type(self)):
Dq = right * self.conjugate
Dq = Dq.quatCoef
misOriAxis = (2 * Dq[1:4] * np.arccos(Dq[0])) / np.sqrt(1 - np.power(Dq[0], 2))
return misOriAxis
raise TypeError("Input must be a quaternion.")
# Static methods
@staticmethod
def createManyQuats(eulerArray):
"""Create a an array of quats from an array of Euler angles
Args:
eulerArray (array): Size 3 x n x ... x m
"""
ph1 = eulerArray[0]
phi = eulerArray[1]
ph2 = eulerArray[2]
oriShape = eulerArray.shape[1:]
quatComps = np.zeros((4,) + oriShape, dtype=float)
quatComps[0] = np.cos(phi / 2.0) * np.cos((ph1 + ph2) / 2.0)
quatComps[1] = -np.sin(phi / 2.0) * np.cos((ph1 - ph2) / 2.0)
quatComps[2] = -np.sin(phi / 2.0) * np.sin((ph1 - ph2) / 2.0)
quatComps[3] = -np.cos(phi / 2.0) * np.sin((ph1 + ph2) / 2.0)
quats = np.empty(oriShape, dtype=Quat)
for idx in np.ndindex(oriShape):
quats[idx] = Quat(quatComps[(slice(None),) + idx])
# quatComps[(slice(None),) + idx] is equivalent to quatComps[:, idx[0], ..., idx[n]]
return quats
@staticmethod
def calcSymEqvs(quats, symGroup):
syms = Quat.symEqv(symGroup)
quatComps = np.empty((len(syms), 4, len(quats)))
# store quat components in array
for i, quat in enumerate(quats):
quatComps[0, :, i] = quat.quatCoef
# calculate symmetrical equivalents
for i, sym in enumerate(syms[1:], start=1):
# sym[i] * quat for all points (* is quaternion product)
quatComps[i, 0, :] = (quatComps[0, 0, :] * sym[0] - quatComps[0, 1, :] * sym[1] -
quatComps[0, 2, :] * sym[2] - quatComps[0, 3, :] * sym[3])
quatComps[i, 1, :] = (quatComps[0, 0, :] * sym[1] + quatComps[0, 1, :] * sym[0] -
quatComps[0, 2, :] * sym[3] + quatComps[0, 3, :] * sym[2])
quatComps[i, 2, :] = (quatComps[0, 0, :] * sym[2] + quatComps[0, 2, :] * sym[0] -
quatComps[0, 3, :] * sym[1] + quatComps[0, 1, :] * sym[3])
quatComps[i, 3, :] = (quatComps[0, 0, :] * sym[3] + quatComps[0, 3, :] * sym[0] -
quatComps[0, 1, :] * sym[2] + quatComps[0, 2, :] * sym[1])
# swap into positve hemisphere if required
quatComps[i, :, quatComps[i, 0, :] < 0] = -quatComps[i, :, quatComps[i, 0, :] < 0]
return quatComps
@staticmethod
def calcAverageOri(quatComps):
avOri = np.copy(quatComps[0, :, 0])
currMisOris = np.empty(quatComps.shape[0])
for i in range(1, quatComps.shape[2]):
# calculate misorientation between current average and all symmetrical equivalents
# Dot product of each symm quat in quatComps with refOri for point i
currMisOris[:] = abs(np.einsum("ij,j->i", quatComps[:, :, i], avOri))
# find min misorientation with current average then add to it
maxIdx = np.argmax(currMisOris[:])
avOri += quatComps[maxIdx, :, i]
# Convert components back to a quat and normalise
avOri = Quat(avOri)
avOri.normalise()
return avOri
@staticmethod
def calcMisOri(quatComps, refOri):
misOris = np.empty((quatComps.shape[0], quatComps.shape[2]))
# Dot product of each quat in quatComps with refOri
misOris[:, :] = abs(np.einsum("ijk,j->ik", quatComps, refOri.quatCoef))
maxIdxs0 = np.argmax(misOris, axis=0)
maxIdxs1 = np.arange(misOris.shape[1])
minMisOris = misOris[maxIdxs0, maxIdxs1]
minQuatComps = quatComps[maxIdxs0, :, maxIdxs1].transpose()
minMisOris[minMisOris > 1] = 1
return minMisOris, minQuatComps
@staticmethod
def polarAngles(x, y, z):
mod = np.sqrt(x**2 + y**2 + z**2)
x = x / mod
y = y / mod
z = z / mod
# alpha - angle with z axis
alpha = np.arccos(z)
# beta - angle around z axis
beta = np.arctan2(y, x)
return alpha, beta
@staticmethod
def stereoProject(*args):
if len(args) == 3:
alpha, beta = Quat.polarAngles(args[0], args[1], args[2])
elif len(args) == 2:
alpha, beta = args
else:
raise Exception("3 arguments for pole directions and 2 for polar angles.")
alphaComp = np.tan(alpha / 2)
xp = alphaComp * np.cos(beta)
yp = alphaComp * np.sin(beta)
return xp, yp
@staticmethod
def plotLine(startPoint, endPoint, plotSymmetries=False, symGroup=None, res=100, projection=None, ax=None, **kwargs):
if projection is None:
projection = Quat.stereoProject
if ax is None:
ax = plt.gca()
lines = []
lines.append((startPoint, endPoint))
if plotSymmetries:
if symGroup is None:
raise Exception("Please provide a symGroup")
for symm in Quat.symEqv(symGroup)[1:]:
startPointSymm = symm.transformVector(startPoint).astype(int)
endPointSymm = symm.transformVector(endPoint).astype(int)
if startPointSymm[2] < 0:
startPointSymm *= -1
if endPointSymm[2] < 0:
endPointSymm *= -1
lines.append((startPointSymm, endPointSymm))
linePoints = np.zeros((3, res), dtype=float)
for line in lines:
for i in range(3):
if line[0][i] == line[1][i]:
linePoints[i] = np.full(res, line[0][i])
else:
linePoints[i] = np.linspace(line[0][i], line[1][i], res)
xp, yp = projection(linePoints[0], linePoints[1], linePoints[2])
ax.plot(xp, yp, **kwargs)
@staticmethod
def labelPoint(point, label, projection=None, ax=None, padX=0, padY=0, **kwargs):
if projection is None:
projection = Quat.stereoProject
if ax is None:
ax = plt.gca()
xp, yp = projection(point[0], point[1], point[2])
ax.text(xp + padX, yp + padY, label, **kwargs)
@staticmethod
def plotPoleAxis(plotType, symGroup, ax=None):
if ax is None:
ax = plt.gca()
if plotType == "IPF" and symGroup == "cubic":
# line between [001] and [111]
Quat.plotLine(np.array([0, 0, 1]), np.array([1, 1, 1]), ax=ax, c='k', lw=2)
# line between [001] and [101]
Quat.plotLine(np.array([0, 0, 1]), np.array([1, 0, 1]), ax=ax, c='k', lw=2)
# line between [101] and [111]
Quat.plotLine(np.array([1, 0, 1]), np.array([1, 1, 1]), ax=ax, c='k', lw=2)
# label poles
Quat.labelPoint(np.array([0, 0, 1]), '001', ax=ax, padY=-0.005, va='top', ha='center')
Quat.labelPoint(np.array([1, 0, 1]), '101', ax=ax, padY=-0.005, va='top', ha='center')
Quat.labelPoint(np.array([1, 1, 1]), '111', ax=ax, padY=0.005, va='bottom', ha='center')
ax.axis('equal')
ax.axis('off')
else:
print("Only works for cubic")
@staticmethod
def plotIPF(quats, direction, symGroup, ax=None, **kwargs):
plotParams = {'marker': '+', 'c': 'r'}
plotParams.update(kwargs)
if ax is None:
ax = plt.gca()
if symGroup == "hexagonal":
raise Exception("Have fun with that")
# Plot IPF axis
# plt.figure()
Quat.plotPoleAxis("IPF", symGroup, ax=ax)
# get array of symmetry operations. shape - (numSym, 4, numQuats)
quatCompsSym = Quat.calcSymEqvs(quats, symGroup)
# array to store crytal directions for all orientations and symmetries
directionCrystal = np.empty((3, quatCompsSym.shape[0], quatCompsSym.shape[2]))
# temp variables to use bleow
quatDotVec = (quatCompsSym[:, 1, :] * direction[0] +
quatCompsSym[:, 2, :] * direction[1] +
quatCompsSym[:, 3, :] * direction[2])
temp = (np.square(quatCompsSym[:, 0, :]) - np.square(quatCompsSym[:, 1, :]) -
np.square(quatCompsSym[:, 2, :]) - np.square(quatCompsSym[:, 3, :]))
# transform the pole direction to crystal coords for all orientations and symmetries
# (quatCompsSym * vectorQuat) * quatCompsSym.conjugate
directionCrystal[0, :, :] = (2 * quatDotVec * quatCompsSym[:, 1, :] +
temp * direction[0] +
2 * quatCompsSym[:, 0, :] * (quatCompsSym[:, 2, :] * direction[2] -
quatCompsSym[:, 3, :] * direction[1]))
directionCrystal[1, :, :] = (2 * quatDotVec * quatCompsSym[:, 2, :] +
temp * direction[1] +
2 * quatCompsSym[:, 0, :] * (quatCompsSym[:, 3, :] * direction[0] -
quatCompsSym[:, 1, :] * direction[2]))
directionCrystal[2, :, :] = (2 * quatDotVec * quatCompsSym[:, 3, :] +
temp * direction[2] +
2 * quatCompsSym[:, 0, :] * (quatCompsSym[:, 1, :] * direction[1] -
quatCompsSym[:, 2, :] * direction[0]))
# normalise vectors
directionCrystal /= np.sqrt(np.einsum('ijk,ijk->jk', directionCrystal, directionCrystal))
# move all vectors into north hemisphere
directionCrystal[:, directionCrystal[2, :, :] < 0] *= -1
# convert to spherical coordinates
alpha, beta = Quat.polarAngles(directionCrystal[0], directionCrystal[1], directionCrystal[2])
# find the poles in the fundamental triangle
if symGroup == "cubic":
# first beta should be between 0 and 45 deg leaving 3 symmetric equivalents per orientation
trialPoles = np.logical_and(beta >= 0, beta <= np.pi / 4)
# if less than 3 left need to expand search slighly to catch edge cases
if np.sum(np.sum(trialPoles, axis=0) < 3) > 0:
deltaBeta = 1e-8
trialPoles = np.logical_and(beta >= -deltaBeta, beta <= np.pi / 4 + deltaBeta)
# create array to store angles of pols in fundermental triangle
alphaFund, betaFund = np.empty((quatCompsSym.shape[2])), np.empty((quatCompsSym.shape[2]))
# now of symmetric equivalents left we want the one with minimum alpha
# loop over different orientations
for i in range(trialPoles.shape[1]):
# create array of indexes of poles kept in previous step
trialPoleIdxs = np.arange(trialPoles.shape[0])[trialPoles[:, i]]
# find pole with minimum alpha of those kept in previous step
# then use trialPoleIdxs to get its index in original arrays
poleIdx = trialPoleIdxs[np.argmin(alpha[trialPoles[:, i], i])]
# add to final array of poles
alphaFund[i] = alpha[poleIdx, i]
betaFund[i] = beta[poleIdx, i]
else:
print("Only works for cubic")
# project onto equatorial plane
xp, yp = Quat.stereoProject(alphaFund, betaFund)
# plot poles
ax.scatter(xp, yp, **plotParams)
@staticmethod
def symEqv(group):
overRoot2 = np.sqrt(2) / 2
sqrt3over2 = np.sqrt(3) / 2
qsym = []
# identity - this should always be returned as the first symmetry
qsym.append(Quat(np.array([1.0, 0.0, 0.0, 0.0])))
# from <NAME>'s fspl_orir.f90 code
# checked for consistency with mtex
# cubic tetrads(100)
qsym.append(Quat(np.array([overRoot2, overRoot2, 0.0, 0.0])))
qsym.append(Quat(np.array([0.0, 1.0, 0.0, 0.0])))
qsym.append(Quat( | np.array([overRoot2, -overRoot2, 0.0, 0.0]) | numpy.array |
"""
Derived from: https://github.com/kratzert/finetune_alexnet_with_tensorflow/
"""
import numpy as np
import cv2
class BatchPreprocessor(object):
def __init__(self, dataset_file_path, num_classes, output_size=[227, 227], horizontal_flip=False, shuffle=False,
mean_color=[132.2766, 139.6506, 146.9702], multi_scale=None):
self.num_classes = num_classes
self.output_size = output_size
self.horizontal_flip = horizontal_flip
self.shuffle = shuffle
self.mean_color = mean_color
self.multi_scale = multi_scale
self.pointer = 0
self.images = []
self.labels = []
# Read the dataset file
dataset_file = open(dataset_file_path)
lines = dataset_file.readlines()
for line in lines:
items = line.split()
self.images.append(items[0])
self.labels.append(int(items[1]))
# Shuffle the data
if self.shuffle:
self.shuffle_data()
def shuffle_data(self):
images = self.images[:]
labels = self.labels[:]
self.images = []
self.labels = []
idx = np.random.permutation(len(labels))
for i in idx:
self.images.append(images[i])
self.labels.append(labels[i])
def reset_pointer(self):
self.pointer = 0
if self.shuffle:
self.shuffle_data()
def next_batch(self, batch_size):
# Get next batch of image (path) and labels
paths = self.images[self.pointer:(self.pointer+batch_size)]
labels = self.labels[self.pointer:(self.pointer+batch_size)]
# Update pointer
self.pointer += batch_size
# Read images
images = np.ndarray([batch_size, self.output_size[0], self.output_size[1], 3])
for i in range(len(paths)):
img = cv2.imread(paths[i])
# Flip image at random if flag is selected
if self.horizontal_flip and np.random.random() < 0.5:
img = cv2.flip(img, 1)
if self.multi_scale is None:
# Resize the image for output
img = cv2.resize(img, (self.output_size[0], self.output_size[0]))
img = img.astype(np.float32)
elif isinstance(self.multi_scale, list):
# Resize to random scale
new_size = np.random.randint(self.multi_scale[0], self.multi_scale[1], 1)[0]
img = cv2.resize(img, (new_size, new_size))
img = img.astype(np.float32)
# random crop at output size
diff_size = new_size - self.output_size[0]
random_offset_x = np.random.randint(0, diff_size, 1)[0]
random_offset_y = | np.random.randint(0, diff_size, 1) | numpy.random.randint |
from __future__ import division
from Metodo import Metodo
from bokeh.plotting import figure, output_file, show
from bokeh.embed import components
import numpy as np
from numpy import sin, cos, tan, sqrt, e, pi, log, \
cosh, sinh, tanh, arccos, arcsin, abs, arctan
| np.seterr(divide="ignore", invalid="ignore") | numpy.seterr |
import argparse
from planet_wind_constants import *
from scipy.special import wofz
import time
from scipy.optimize import newton
from scipy.interpolate import interp1d
from scipy.integrate import quad
from scipy.interpolate import RegularGridInterpolator
import planet_wind_utils_v6 as pw
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import deepdish as dd # pip install deepdish
mpl.use('Agg')
#from scipy.optimize import root
def I(mu, ld1, ld2):
return np.where(mu == 0.0, 0.0, (1. - ld1 * (1. - mu) - ld2 * (1. - mu)**2))
def Voigt(x, alpha, gamma):
sigma = alpha / np.sqrt(2.0*np.log(2.0))
return np.real(wofz((x + 1j*gamma)/sigma/np.sqrt(2.0)))/sigma/np.sqrt(2.0*np.pi)
def New_get_interp_function(d, var):
dph = np.gradient(d['x3v'])[0]
x3v = np.append(d['x3v'][0]-dph, d['x3v'])
x3v = np.append(x3v, x3v[-1]+dph)
var_data = np.append([var[-1]], var, axis=0)
var_data = np.append(var_data, [var_data[0]], axis=0)
var_interp = RegularGridInterpolator(
(x3v, d['x2v'], d['x1v']), var_data, bounds_error=True)
return var_interp
def initial_guess(floor_val = 1.e-30):
# initial guess for electron number density
ne_init = 0.1*d['rho']*Xuni/c.mp
#print("ne_init:",np.sum(ne_init))
# initial guess for number density of H I
nh1 = np.copy(d['rho']*Xuni/c.mp - ne_init)
nhe1 = np.copy(d['rho']*Yuni/(4.0*c.mp))
nhe3 = np.ones_like(nhe1)*floor_val
#print("nh1:",np.sum(nh1))
#print("nhe1:",np.sum(nhe1))
#print("nhe3:",np.sum(nhe3))
# initial guess for optical depth in each cell
tau_integral = np.clip(nh1*sigma_photo_nu0*d['gx1v'],floor_val,100)
tau1_integral = np.clip(nhe1*sigma_photo_nu1*d['gx1v'],floor_val,100)
tau3_integral = np.clip(nhe3*sigma_photo_nu3*d['gx1v'],floor_val,100)
#print ("tau:", np.sum(tau_integral) )
#print ("tau1:", np.sum(tau1_integral) )
#print ("tau3:", np.sum(tau3_integral) )
ne = apply_lim( phi*np.exp(-tau_integral)/(2.0*alpha) * (np.sqrt(1.0 + apply_lim(4.0*d['rho']*Xuni*alpha/(c.mp*phi*np.exp(-tau_integral)), floor_val)) - 1.0), floor_val) # electrons from hydrogen ionization
nh1= apply_lim(d['rho']*Xuni/c.mp - ne, floor_val) # neutral hydrogen
return tau_integral, tau1_integral, tau3_integral, ne, nh1
def new_guess(tau_integral, tau1_integral, tau3_integral, ne, nh1,
floor_val = 1.e-30):
# H number desnity
nh_plus = apply_lim( phi*np.exp(-1.0*tau_integral)*d['rho']*Xuni/c.mp / (phi*np.exp(-1.0*tau_integral) + ne*alpha), floor_val ) # ionized hydrogen
print('diff nh1 (med, av):',np.median(d['rho']*Xuni/c.mp - nh_plus - nh1), np.average(d['rho']*Xuni/c.mp - nh_plus - nh1))
nh1 = apply_lim(d['rho']*Xuni/c.mp - nh_plus, floor_val) # neutral hydrogen
# helium number densities
f3 = apply_lim((ne*alpha1 - (ne*alpha3)*(ne*alpha1 + phi1*np.exp(-1.0*tau1_integral) + ne*q13a)/(ne*alpha3 - ne*q13a)) / (ne*alpha1 - A31 - ne*q31a - ne*q31b - nh1*Q31 -
(ne*alpha1 + phi1*np.exp(-1.0*tau1_integral) + ne*q13a)*(ne*alpha3 + A31 + phi3*np.exp(-1.0*tau3_integral) + ne*q31a + ne*q31b + nh1*Q31)/(ne*alpha3 - ne*q13a)),
floor_val)
f1 = apply_lim((ne*alpha3 - f3*(ne*alpha3 + A31 + phi3*np.exp(-tau3_integral) + ne*q31a + ne*q31b + nh1*Q31)) / (ne*alpha3 - ne*q13a), floor_val)
nhe1 = apply_lim(f1*d['rho']*Yuni/(4.0*c.mp), floor_val) # ground-state helium
nhe3 = apply_lim(f3*d['rho']*Yuni/(4.0*c.mp), floor_val) # metastable-state helium
# ionized helium
nhe_plus = apply_lim((1.0 - f1 - f3)*d['rho']*Yuni/(4.0*c.mp), floor_val)
# optical depth
tau_integral = np.clip(nh1*sigma_photo_nu0*d['gx1v'],floor_val,100)
tau1_integral = np.clip(nhe1*sigma_photo_nu1*d['gx1v'],floor_val,100)
tau3_integral = np.clip(nhe3*sigma_photo_nu3*d['gx1v'],floor_val,100)
ne = np.copy(nh_plus + nhe_plus)
return ne, nh1, nh_plus, nhe1, nhe3, nhe_plus, tau_integral, tau1_integral, tau3_integral
def generate_random(N_mc):
theta = 2*np.pi*np.random.random_sample(N_mc)
r = np.sqrt(np.random.random_sample(N_mc))
yrandom = r*np.cos(theta)
zrandom = r*np.sin(theta)
return yrandom, zrandom
def generate_random_weighted(N_mc,yp,zp,rad_planet_frac):
yrandom = []
zrandom = []
rad = []
while len(yrandom)<N_mc:
theta = np.random.uniform(0,2*np.pi)
r = (1+np.sqrt(yp**2 + zp**2))*(np.random.uniform(0,1))**1.5
if (r>rad_planet_frac):
yr = yp + r*np.cos(theta)
zr = zp + r*np.sin(theta)
if(yr**2 + zr**2 < 1):
rad.append(r)
yrandom.append(yr)
zrandom.append(zr)
yrandom = np.array(yrandom)
zrandom = np.array(zrandom)
rad = np.array(rad)
weights = rad**(4/3.)
norm = np.sum(weights)
weights *= norm**-1
return yrandom, zrandom, weights
def generate_rays_weighted(Nr,slope,yp,zp,rad_planet_frac):
"""Nr is the number of radius bins from the planet, slope is the power law sampling (1=linear, <1 is centrally concentrated)"""
#rf = np.logspace(np.log10(rad_planet_frac),np.log10(1+np.sqrt(yp**2 + zp**2)),Nr) # faces of the rings
rf = np.linspace(rad_planet_frac**slope,(1+np.sqrt(yp**2 + zp**2))**slope,Nr)**(1/slope)
ra = (2/3)*(rf[1:]**3 - rf[0:-1]**3)/(rf[1:]**2 - rf[0:-1]**2) ## Area weighted centers
dr = rf[1:]-rf[0:-1]
rr = []
tt = []
da = []
for i in range(len(ra)):
Nth = np.int(np.round(2*np.pi*ra[i] / dr[i]))
th = np.linspace(0,2*np.pi,Nth+1)
th = 0.5*(th[1:]+th[0:-1])
dth = th[1]-th[0]
for j in range(Nth):
rr.append(ra[i])
tt.append(th[j])
da.append( np.pi*(rf[i+1]**2-rf[i]**2)/Nth )
rr = np.array(rr).flatten()
tt = np.array(tt).flatten()
da = np.array(da).flatten()
yrays = yp + rr*np.cos(tt)
zrays = zp + rr*np.sin(tt)
sel = np.sqrt(yrays**2 + zrays**2 ) < 1.0
yrays = yrays[sel].copy()
zrays = zrays[sel].copy()
da = da[sel].copy()
print("selected N=",len(yrays),"rays")
return yrays,zrays,da
def sum_tau_LOS(ray):
nu_array = np.broadcast_to(nu, (len(ray['vx']), len(nu)))
vx_array = np.broadcast_to(ray['vx'], (len(nu), len(ray['vx']))).T
vy_array = np.broadcast_to(ray['vy'], (len(nu), len(ray['vx']))).T
nhe3_array = np.broadcast_to(ray['nhe3'], (len(nu), len(ray['vx']))).T
dl_array = np.broadcast_to(ray['dl'], (len(nu), len(ray['vx']))).T
da1_array = np.broadcast_to(ray['da1'], (len(nu), len(ray['vx']))).T
da2_array = np.broadcast_to(ray['da2'], (len(nu), len(ray['vx']))).T
da3_array = np.broadcast_to(ray['da3'], (len(nu), len(ray['vx']))).T
delta_u1 = np.copy(c.c*(nu_array-nu1)/nu1 + (vx_array *
np.cos(azim_angle) + vy_array*np.sin(azim_angle))*np.sign(x2))
xx1 = np.copy(delta_u1*nu1/c.c)
delta_u2 = np.copy(c.c*(nu_array-nu2)/nu2 + (vx_array *
np.cos(azim_angle) + vy_array*np.sin(azim_angle))*np.sign(x2))
xx2 = np.copy(delta_u2*nu2/c.c )
delta_u3 = np.copy(c.c*(nu_array-nu3)/nu3 + (vx_array *
np.cos(azim_angle) + vy_array*np.sin(azim_angle))*np.sign(x2))
xx3 = np.copy(delta_u3*nu3/c.c)
tauLOS1 = np.sum(nhe3_array*dl_array*cs1 *
Voigt(xx1, da1_array, natural_gamma), axis=0)
tauLOS2 = np.sum(nhe3_array*dl_array*cs2 *
Voigt(xx2, da2_array, natural_gamma), axis=0)
tauLOS3 = np.sum(nhe3_array*dl_array*cs3 *
Voigt(xx3, da3_array, natural_gamma), axis=0)
return tauLOS1, tauLOS2, tauLOS3
def MC_ray(dart):
""" computes sum of tau along LOS of a ray defined by integer 'dart' """
ydart = yrandom[dart]
zdart = zrandom[dart]
print('dart: ', dart, ydart, zdart)
ray = pw.get_ray(planet_pos=(x2, y2, z2),
ydart=ydart,
zdart=zdart,
azim_angle=azim_angle,
pol_angle=0.0,
rstar=rad_star,
rplanet=rp,
fstep=f_raystep,
inner_lim=in_lim,
outer_lim=out_lim)
#print(ray['l'])
#print("min dl / rp= ",np.min(ray['dl'])/rp )
# boolean, whether the ray intersects the planet
throughplanet = (np.amin( np.sqrt( (ray['x']-x2)**2 + (ray['y']-y2)**2 + (ray['z']-z2)**2 ) ) < rp)
ray['nhe3'] = nhe3_interp((ray['phi'], ray['theta'], ray['r']))
ray['vx'] = vx_interp((ray['phi'], ray['theta'], ray['r']))
ray['vy'] = vy_interp((ray['phi'], ray['theta'], ray['r']))
ray['vz'] = vz_interp((ray['phi'], ray['theta'], ray['r']))
ray['temp'] = temp_interp((ray['phi'], ray['theta'], ray['r']))
ray['da1'] = np.sqrt(2.0*np.log(2.0))*nu1 * \
np.sqrt(0.25*c.kB*ray['temp']/c.mp)/c.c
ray['da2'] = np.sqrt(2.0*np.log(2.0))*nu2 * \
np.sqrt(0.25*c.kB*ray['temp']/c.mp)/c.c
ray['da3'] = np.sqrt(2.0*np.log(2.0))*nu3 * \
np.sqrt(0.25*c.kB*ray['temp']/c.mp)/c.c
tauLOS1, tauLOS2, tauLOS3 = sum_tau_LOS(ray)
if throughplanet:
expfac = np.zeros_like(tauLOS1)
print ("...planet crossing ray!")
else:
expfac = np.exp(-tauLOS1 - tauLOS2 - tauLOS3)
return expfac
def make_plots(it_num, jcoord):
# temp
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
d['temp'][:, jcoord, :]), cmap=plt.cm.Spectral, vmax=6, vmin=2,shading='auto')
plt.colorbar(label=r"T [K]")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'temp_'+str(it_num+1)+'.png')
plt.close()
# tau hydrogen
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
tau_integral[:, jcoord, :]), cmap=plt.cm.magma, vmax=-2.0, vmin=-7.0,shading='auto')
plt.colorbar(label=r"$\tau$")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'tauHI_'+str(it_num+1)+'.png')
plt.close()
# tau helium singlet
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
tau1_integral[:, jcoord, :]), cmap=plt.cm.magma, vmax=-2.0, vmin=-7.0,shading='auto')
plt.colorbar(label=r"$\tau1$")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'tauHe1_'+str(it_num+1)+'.png')
plt.close()
# tau helium triplet
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
tau3_integral[:, jcoord, :]), cmap=plt.cm.magma, vmax=0.0, vmin=-7.0,shading='auto')
plt.colorbar(label=r"$\tau3$")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'tauHe3_'+str(it_num+1)+'.png')
plt.close()
# electron number density
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
ne[:, jcoord, :]), cmap=plt.cm.magma, vmax=10.0, vmin=-5.0,shading='auto')
plt.colorbar(label=r"ne")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'ne_'+str(it_num+1)+'.png')
plt.close()
# H I number density
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
nh1[:, jcoord, :]), cmap=plt.cm.magma, vmax=10.0, vmin=-5.0,shading='auto')
plt.colorbar(label=r"nh1")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'nh1_'+str(it_num+1)+'.png')
plt.close()
# He I singlet number density
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
nhe1[:, jcoord, :]), cmap=plt.cm.magma, vmax=10.0, vmin=-5.0,shading='auto')
plt.colorbar(label=r"nhe1")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'nhe1_'+str(it_num+1)+'.png')
plt.close()
# He I triplet number density
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][:, jcoord, :], d['y'][:, jcoord, :], np.log10(
nhe3[:, jcoord, :]), cmap=plt.cm.magma, vmax=5.0, vmin=-7.0,shading='auto')
plt.colorbar(label=r"nhe3")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'nhe3_'+str(it_num+1)+'.png')
plt.close()
def make_side_plots(it_num, icoord):
# tau
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][icoord, :, :], d['z'][icoord, :, :], np.log10(
tau_integral[icoord, :, :]), cmap=plt.cm.magma, vmax=0.0, vmin=-7.0,shading='auto')
plt.colorbar(label=r"$\tau$")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'stau05_'+str(it_num+1)+'.png')
plt.close()
# H I number density
plt.figure(figsize=(8, 8))
plt.pcolormesh(d['x'][icoord, :, :], d['z'][icoord, :, :], np.log10(
nh1[icoord, :, :]), cmap=plt.cm.magma, vmax=10.0, vmin=-5.0,shading='auto')
plt.colorbar(label=r"nh1")
plt.axis('equal')
plt.plot(x2, y2, 'w*')
plt.xlim(-lim/2-a, lim/2-a)
plt.ylim(-lim/2, lim/2)
plt.savefig(base_dir+'snh05_'+str(it_num+1)+'.png')
plt.close()
def get_BC_prop(d):
bc_ind = np.argmin(d['rp'].flatten())
print("rp/Rp=", d['rp'].flatten()[bc_ind] / rp)
pp = d['press'].flatten()[bc_ind]
rhop = d['rho'].flatten()[bc_ind]
Bern = gamma/(gamma-1.0)*pp/rhop - c.G*m2/rp
print("Bern = ", Bern)
K = pp/rhop**gamma
print("K =", K)
lambda_planet = c.G*m2*rhop/(gamma*pp*rp)
print("lambda = ", lambda_planet)
mdot_est = np.pi*rhop * \
np.sqrt(c.G*m2*(rp*lambda_planet)**3)*np.exp(1.5-lambda_planet)
print("mdot_est =", mdot_est)
return Bern, K, lambda_planet, mdot_est
def parker_fv(v, r):
return v*np.exp(-0.5*v*v/(vS*vS))/vS - rS*rS*np.exp(-2.0*rS/r + 1.5)/(r*r)
def parker_frho(r, v):
return rhoS*np.exp(2.0*rS/r - 1.5 - 0.5*v*v/(vS*vS))
def get_Parker_rho_v_func(r_out=1.e11, num=1000):
vguess = 1.e5
r_aux = np.linspace(1.0/(0.9*rp), 1/r_out, num)
r = 1.0/r_aux
res_v = | np.zeros(num) | numpy.zeros |
# MIT License
#
# Copyright (c) 2017 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
import threading
from src.manager.DataManager import DataManager
from src.manager.RunManager import RunManager
from src.manager.PlotManager import PlotManager
from src.utils.Progressbar import Progressbar
from src.utils.ThreadedPause import ThreadedPause
class ScriptRunner:
def __init__(self, run_config, model_details):
self.run_config = run_config
self.model_details = model_details
# Format settings
line_length = 80
line = line_length * "-"
print(line)
# create the log directory helper
self.run_manager = RunManager(run_config)
print(line)
# Initialize the model
self.run_manager.init_model(model_details)
self.stats = self.run_manager.create_stats()
self.conf = self.run_manager.model_config
print(line)
# transformation parameters
run_config['in_length'] = self.run_manager.model_config['rec_num_layers']
run_config['out_length'] = self.run_manager.model_config['rec_num_layers_teacher_forcing'] \
+ self.run_manager.model_config['rec_num_layers_student_forcing']
# create the data manager and get the transformed data
loader = DataManager(run_config)
self.data_sets = loader.load_divided_data()
set_labels = run_config['set_labels']
# define the size of training and validation set
for set_index in range(len(self.data_sets)):
data = self.data_sets[set_index]
size = np.size(data[0], 2)
label = set_labels[set_index]
print("Size of Set {} is {}".format(size, label))
print(line)
# create model header
print("Model {} ({})".format(self.run_manager.model.name, self.conf['model_params']))
print(line)
# create progressbar
self.p_bar = Progressbar(self.conf['episodes'], line_length)
# status variables
self.update = False
self.semaphore = threading.Semaphore()
self.progress_count = self.run_manager.current_episode
self.stats.last_episode = self.run_manager.current_episode - 1
# obtain slices etc.
validation_set_index = self.run_config['validation_set_index']
train_set_index = self.run_config['train_set_index']
val_data = self.data_sets[validation_set_index]
train_data = self.data_sets[train_set_index]
num_vis_traj = self.run_config['num_visualized_results']
va_slices = np.random.permutation(np.size(val_data[0], 2))[:num_vis_traj]
tr_slices = np.random.permutation(np.size(train_data[0], 2))[:num_vis_traj]
self.all_in = np.concatenate((train_data[0][:, :, tr_slices], val_data[0][:, :, va_slices]), axis=2)
real_output = np.concatenate((train_data[1][:, :, tr_slices], val_data[1][:, :, va_slices]), axis=2)
pred_output = self.run_manager.model.predict(self.all_in)
self.plots = PlotManager(run_config['num_visualized_results'],
run_config['input_grouping'],
run_config['output_grouping'], self.all_in, real_output, pred_output)
# set interactive mode on
plt.ion()
plt.show()
if self.run_manager.current_episode > 0:
self.stats.plot()
self.plots.plot()
thread = threading.Thread(None, self.train, "Training Thread")
thread.start()
while thread.is_alive():
self.plot()
ThreadedPause.pause(2)
def train(self):
validation_set_index = self.run_config['validation_set_index']
train_set_index = self.run_config['train_set_index']
train_data = self.data_sets[train_set_index]
# set the range
for episode in range(self.run_manager.current_episode, self.conf['episodes']):
# execute as much episodes
for step in range(self.conf['steps_per_episode']):
# sample them randomly according to the batch size and train the model
slices = np.random.randint(0, | np.size(train_data[0], 2) | numpy.size |
###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleIsochroneApprox
#
# Calculate actions-angle coordinates for any potential by using
# an isochrone potential as an approximate potential and using
# a Fox & Binney (2013?) + torus machinery-like algorithm
# (angle-fit) (Bovy 2014)
#
# methods:
# __call__: returns (jr,lz,jz)
# actionsFreqs: returns (jr,lz,jz,Or,Op,Oz)
# actionsFreqsAngles: returns (jr,lz,jz,Or,Op,Oz,ar,ap,az)
#
###############################################################################
import math
import warnings
import numpy as nu
import numpy.linalg as linalg
from scipy import optimize
from galpy.potential import dvcircdR, vcirc, _isNonAxi
from galpy.potential.Potential import flatten as flatten_potential
from .actionAngleIsochrone import actionAngleIsochrone
from .actionAngle import actionAngle
from galpy.potential import IsochronePotential, MWPotential
from galpy.util import bovy_plot, galpyWarning
from galpy.util.bovy_conversion import physical_conversion, \
potential_physical_input, time_in_Gyr
_TWOPI= 2.*nu.pi
_ANGLETOL= 0.02 #tolerance for deciding whether full angle range is covered
_APY_LOADED= True
try:
from astropy import units
except ImportError:
_APY_LOADED= False
class actionAngleIsochroneApprox(actionAngle):
"""Action-angle formalism using an isochrone potential as an approximate potential and using a Fox & Binney (2014?) like algorithm to calculate the actions using orbit integrations and a torus-machinery-like angle-fit to get the angles and frequencies (Bovy 2014)"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleIsochroneApprox object
INPUT:
Either:
b= scale parameter of the isochrone parameter (can be Quantity)
ip= instance of a IsochronePotential
aAI= instance of an actionAngleIsochrone
pot= potential to calculate action-angle variables for
tintJ= (default: 100) time to integrate orbits for to estimate actions (can be Quantity)
ntintJ= (default: 10000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
dt= (None) orbit.integrate dt keyword (for fixed stepsize integration)
maxn= (default: 3) Default value for all methods when using a grid in vec(n) up to this n (zero-based)
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OUTPUT:
instance
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
actionAngle.__init__(self,
ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))
if not 'pot' in kwargs: #pragma: no cover
raise IOError("Must specify pot= for actionAngleIsochroneApprox")
self._pot= flatten_potential(kwargs['pot'])
if self._pot == MWPotential:
warnings.warn("Use of MWPotential as a Milky-Way-like potential is deprecated; galpy.potential.MWPotential2014, a potential fit to a large variety of dynamical constraints (see Bovy 2015), is the preferred Milky-Way-like potential in galpy",
galpyWarning)
if not 'b' in kwargs and not 'ip' in kwargs \
and not 'aAI' in kwargs: #pragma: no cover
raise IOError("Must specify b=, ip=, or aAI= for actionAngleIsochroneApprox")
if 'aAI' in kwargs:
if not isinstance(kwargs['aAI'],actionAngleIsochrone): #pragma: no cover
raise IOError("'Provided aAI= does not appear to be an instance of an actionAngleIsochrone")
self._aAI= kwargs['aAI']
elif 'ip' in kwargs:
ip= kwargs['ip']
if not isinstance(ip,IsochronePotential): #pragma: no cover
raise IOError("'Provided ip= does not appear to be an instance of an IsochronePotential")
self._aAI= actionAngleIsochrone(ip=ip)
else:
if _APY_LOADED and isinstance(kwargs['b'],units.Quantity):
b= kwargs['b'].to(units.kpc).value/self._ro
else:
b= kwargs['b']
self._aAI= actionAngleIsochrone(ip=IsochronePotential(b=b,
normalize=1.))
self._tintJ= kwargs.get('tintJ',100.)
if _APY_LOADED and isinstance(self._tintJ,units.Quantity):
self._tintJ= self._tintJ.to(units.Gyr).value\
/time_in_Gyr(self._vo,self._ro)
self._ntintJ= kwargs.get('ntintJ',10000)
self._integrate_dt= kwargs.get('dt',None)
self._tsJ= nu.linspace(0.,self._tintJ,self._ntintJ)
self._integrate_method= kwargs.get('integrate_method','dopr54_c')
self._maxn= kwargs.get('maxn',3)
self._c= False
ext_loaded= False
if ext_loaded and (('c' in kwargs and kwargs['c'])
or not 'c' in kwargs): #pragma: no cover
self._c= True
else:
self._c= False
# Check the units
self._check_consistent_units()
return None
def _evaluate(self,*args,**kwargs):
"""
NAME:
__call__ (_evaluate)
PURPOSE:
evaluate the actions (jr,lz,jz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
cumul= if True, return the cumulative average actions (to look
at convergence)
OUTPUT:
(jr,lz,jz)
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
R,vR,vT,z,vz,phi= self._parse_args(False,False,*args)
if self._c: #pragma: no cover
pass
else:
#Use self._aAI to calculate the actions and angles in the isochrone potential
acfs= self._aAI._actionsFreqsAngles(R.flatten(),
vR.flatten(),
vT.flatten(),
z.flatten(),
vz.flatten(),
phi.flatten())
jrI= nu.reshape(acfs[0],R.shape)[:,:-1]
jzI= nu.reshape(acfs[2],R.shape)[:,:-1]
anglerI= nu.reshape(acfs[6],R.shape)
anglezI= nu.reshape(acfs[8],R.shape)
if nu.any((nu.fabs(nu.amax(anglerI,axis=1)-_TWOPI) > _ANGLETOL)\
*(nu.fabs(nu.amin(anglerI,axis=1)) > _ANGLETOL)): #pragma: no cover
warnings.warn("Full radial angle range not covered for at least one object; actions are likely not reliable",galpyWarning)
if nu.any((nu.fabs(nu.amax(anglezI,axis=1)-_TWOPI) > _ANGLETOL)\
*(nu.fabs(nu.amin(anglezI,axis=1)) > _ANGLETOL)): #pragma: no cover
warnings.warn("Full vertical angle range not covered for at least one object; actions are likely not reliable",galpyWarning)
danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1]
danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1]
if kwargs.get('cumul',False):
sumFunc= nu.cumsum
else:
sumFunc= nu.sum
jr= sumFunc(jrI*danglerI,axis=1)/sumFunc(danglerI,axis=1)
jz= sumFunc(jzI*danglezI,axis=1)/sumFunc(danglezI,axis=1)
if _isNonAxi(self._pot):
lzI= nu.reshape(acfs[1],R.shape)[:,:-1]
anglephiI= nu.reshape(acfs[7],R.shape)
danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1]
if nu.any((nu.fabs(nu.amax(anglephiI,axis=1)-_TWOPI) > _ANGLETOL)\
*(nu.fabs(nu.amin(anglephiI,axis=1)) > _ANGLETOL)): #pragma: no cover
warnings.warn("Full azimuthal angle range not covered for at least one object; actions are likely not reliable",galpyWarning)
lz= sumFunc(lzI*danglephiI,axis=1)/sumFunc(danglephiI,axis=1)
else:
lz= R[:,0]*vT[:,0]
return (jr,lz,jz)
def _actionsFreqs(self,*args,**kwargs):
"""
NAME:
actionsFreqs (_actionsFreqs)
PURPOSE:
evaluate the actions and frequencies (jr,lz,jz,Omegar,Omegaphi,Omegaz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based)
ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT)
_firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz)
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
acfs= self._actionsFreqsAngles(*args,**kwargs)
return (acfs[0],acfs[1],acfs[2],acfs[3],acfs[4],acfs[5])
def _actionsFreqsAngles(self,*args,**kwargs):
"""
NAME:
actionsFreqsAngles (_actionsFreqsAngles)
PURPOSE:
evaluate the actions, frequencies, and angles (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based)
ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT)
_firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
from galpy.orbit import Orbit
_firstFlip= kwargs.get('_firstFlip',False)
#If the orbit was already integrated, set ts to the integration times
if isinstance(args[0],Orbit) and hasattr(args[0]._orb,'orbit') \
and not 'ts' in kwargs:
kwargs['ts']= args[0]._orb.t
elif (isinstance(args[0],list) and isinstance(args[0][0],Orbit)) \
and hasattr(args[0][0]._orb,'orbit') \
and not 'ts' in kwargs:
kwargs['ts']= args[0][0]._orb.t
R,vR,vT,z,vz,phi= self._parse_args(True,_firstFlip,*args)
if 'ts' in kwargs and not kwargs['ts'] is None:
ts= kwargs['ts']
if _APY_LOADED and isinstance(ts,units.Quantity):
ts= ts.to(units.Gyr).value\
/time_in_Gyr(self._vo,self._ro)
else:
ts= nu.empty(R.shape[1])
ts[self._ntintJ-1:]= self._tsJ
ts[:self._ntintJ-1]= -self._tsJ[1:][::-1]
maxn= kwargs.get('maxn',self._maxn)
if self._c: #pragma: no cover
pass
else:
#Use self._aAI to calculate the actions and angles in the isochrone potential
if '_acfs' in kwargs: acfs= kwargs['_acfs']
else:
acfs= self._aAI._actionsFreqsAngles(R.flatten(),
vR.flatten(),
vT.flatten(),
z.flatten(),
vz.flatten(),
phi.flatten())
jrI= nu.reshape(acfs[0],R.shape)[:,:-1]
jzI= nu.reshape(acfs[2],R.shape)[:,:-1]
anglerI= nu.reshape(acfs[6],R.shape)
anglezI= nu.reshape(acfs[8],R.shape)
if nu.any((nu.fabs(nu.amax(anglerI,axis=1)-_TWOPI) > _ANGLETOL)\
*(nu.fabs(nu.amin(anglerI,axis=1)) > _ANGLETOL)): #pragma: no cover
warnings.warn("Full radial angle range not covered for at least one object; actions are likely not reliable",galpyWarning)
if nu.any((nu.fabs(nu.amax(anglezI,axis=1)-_TWOPI) > _ANGLETOL)\
*(nu.fabs(nu.amin(anglezI,axis=1)) > _ANGLETOL)): #pragma: no cover
warnings.warn("Full vertical angle range not covered for at least one object; actions are likely not reliable",galpyWarning)
danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1]
danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1]
jr= nu.sum(jrI*danglerI,axis=1)/nu.sum(danglerI,axis=1)
jz= nu.sum(jzI*danglezI,axis=1)/nu.sum(danglezI,axis=1)
if _isNonAxi(self._pot): #pragma: no cover
lzI= nu.reshape(acfs[1],R.shape)[:,:-1]
anglephiI= nu.reshape(acfs[7],R.shape)
if nu.any((nu.fabs(nu.amax(anglephiI,axis=1)-_TWOPI) > _ANGLETOL)\
*(nu.fabs(nu.amin(anglephiI,axis=1)) > _ANGLETOL)): #pragma: no cover
warnings.warn("Full azimuthal angle range not covered for at least one object; actions are likely not reliable",galpyWarning)
danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1]
lz= nu.sum(lzI*danglephiI,axis=1)/nu.sum(danglephiI,axis=1)
else:
lz= R[:,len(ts)//2]*vT[:,len(ts)//2]
#Now do an 'angle-fit'
angleRT= dePeriod(nu.reshape(acfs[6],R.shape))
acfs7= nu.reshape(acfs[7],R.shape)
negFreqIndx= nu.median(acfs7-nu.roll(acfs7,1,axis=1),axis=1) < 0. #anglephi is decreasing
anglephiT= nu.empty(acfs7.shape)
anglephiT[negFreqIndx,:]= dePeriod(_TWOPI-acfs7[negFreqIndx,:])
negFreqPhi= nu.zeros(R.shape[0],dtype='bool')
negFreqPhi[negFreqIndx]= True
anglephiT[True^negFreqIndx,:]= dePeriod(acfs7[True^negFreqIndx,:])
angleZT= dePeriod(nu.reshape(acfs[8],R.shape))
#Write the angle-fit as Y=AX, build A and Y
nt= len(ts)
no= R.shape[0]
#remove 0,0,0 and half-plane
if _isNonAxi(self._pot):
nn= (2*maxn-1)**2*maxn-(maxn-1)*(2*maxn-1)-maxn
else:
nn= maxn*(2*maxn-1)-maxn
A= nu.zeros((no,nt,2+nn))
A[:,:,0]= 1.
A[:,:,1]= ts
#sorting the phi and Z grids this way makes it easy to exclude the origin
phig= list(nu.arange(-maxn+1,maxn,1))
phig.sort(key = lambda x: abs(x))
phig= nu.array(phig,dtype='int')
if _isNonAxi(self._pot):
grid= nu.meshgrid(nu.arange(maxn),phig,phig)
else:
grid= nu.meshgrid(nu.arange(maxn),phig)
gridR= grid[0].T.flatten()[1:] #remove 0,0,0
gridZ= grid[1].T.flatten()[1:]
mask = nu.ones(len(gridR),dtype=bool)
# excludes axis that is not in half-space
if _isNonAxi(self._pot):
gridphi= grid[2].T.flatten()[1:]
mask= True\
^(gridR == 0)*((gridphi < 0)+((gridphi==0)*(gridZ < 0)))
else:
mask[:2*maxn-3:2]= False
gridR= gridR[mask]
gridZ= gridZ[mask]
tangleR= nu.tile(angleRT.T,(nn,1,1)).T
tgridR= nu.tile(gridR,(no,nt,1))
tangleZ= nu.tile(angleZT.T,(nn,1,1)).T
tgridZ= nu.tile(gridZ,(no,nt,1))
if _isNonAxi(self._pot):
gridphi= gridphi[mask]
tgridphi= nu.tile(gridphi,(no,nt,1))
tanglephi= nu.tile(anglephiT.T,(nn,1,1)).T
sinnR= nu.sin(tgridR*tangleR+tgridphi*tanglephi+tgridZ*tangleZ)
else:
sinnR= nu.sin(tgridR*tangleR+tgridZ*tangleZ)
A[:,:,2:]= sinnR
#Matrix magic
atainv= nu.empty((no,2+nn,2+nn))
AT= nu.transpose(A,axes=(0,2,1))
for ii in range(no):
atainv[ii,:,:,]= linalg.inv(nu.dot(AT[ii,:,:],A[ii,:,:]))
ATAR= nu.sum(AT*nu.transpose(nu.tile(angleRT,(2+nn,1,1)),axes=(1,0,2)),axis=2)
ATAT= nu.sum(AT*nu.transpose(nu.tile(anglephiT,(2+nn,1,1)),axes=(1,0,2)),axis=2)
ATAZ= nu.sum(AT*nu.transpose(nu.tile(angleZT,(2+nn,1,1)),axes=(1,0,2)),axis=2)
angleR= nu.sum(atainv[:,0,:]*ATAR,axis=1)
OmegaR= nu.sum(atainv[:,1,:]*ATAR,axis=1)
anglephi= nu.sum(atainv[:,0,:]*ATAT,axis=1)
Omegaphi= nu.sum(atainv[:,1,:]*ATAT,axis=1)
angleZ= nu.sum(atainv[:,0,:]*ATAZ,axis=1)
OmegaZ= nu.sum(atainv[:,1,:]*ATAZ,axis=1)
Omegaphi[negFreqIndx]= -Omegaphi[negFreqIndx]
anglephi[negFreqIndx]= _TWOPI-anglephi[negFreqIndx]
if kwargs.get('_retacfs',False):
return (jr,lz,jz,OmegaR,Omegaphi,OmegaZ, #pragma: no cover
angleR % _TWOPI,
anglephi % _TWOPI,
angleZ % _TWOPI,acfs)
else:
return (jr,lz,jz,OmegaR,Omegaphi,OmegaZ,
angleR % _TWOPI,
anglephi % _TWOPI,
angleZ % _TWOPI)
def plot(self,*args,**kwargs):
"""
NAME:
plot
PURPOSE:
plot the angles vs. each other, to check whether the isochrone
approximation is good
INPUT:
Either:
a) R,vR,vT,z,vz:
floats: phase-space value for single object
b) Orbit instance
type= ('araz') type of plot to make
a) 'araz': az vs. ar, with color-coded aphi
b) 'araphi': aphi vs. ar, with color-coded az
c) 'azaphi': aphi vs. az, with color-coded ar
d) 'jr': cumulative average of jr with time, to assess convergence
e) 'lz': same as 'jr' but for lz
f) 'jz': same as 'jr' but for jz
deperiod= (False), if True, de-period the angles
downsample= (False) if True, downsample what's plotted to 400 points
+plot kwargs
OUTPUT:
plot to output
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
#Kwargs
type= kwargs.pop('type','araz')
deperiod= kwargs.pop('deperiod',False)
downsample= kwargs.pop('downsample',False)
#Parse input
R,vR,vT,z,vz,phi= self._parse_args('a' in type,False,*args)
#Use self._aAI to calculate the actions and angles in the isochrone potential
acfs= self._aAI._actionsFreqsAngles(R.flatten(),
vR.flatten(),
vT.flatten(),
z.flatten(),
vz.flatten(),
phi.flatten())
if type == 'jr' or type == 'lz' or type == 'jz':
jrI= nu.reshape(acfs[0],R.shape)[:,:-1]
jzI= nu.reshape(acfs[2],R.shape)[:,:-1]
anglerI= nu.reshape(acfs[6],R.shape)
anglezI= nu.reshape(acfs[8],R.shape)
danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1]
danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1]
if True:
sumFunc= nu.cumsum
jr= sumFunc(jrI*danglerI,axis=1)/sumFunc(danglerI,axis=1)
jz= sumFunc(jzI*danglezI,axis=1)/sumFunc(danglezI,axis=1)
lzI= nu.reshape(acfs[1],R.shape)[:,:-1]
anglephiI= nu.reshape(acfs[7],R.shape)
danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1]
lz= sumFunc(lzI*danglephiI,axis=1)/sumFunc(danglephiI,axis=1)
from galpy.orbit import Orbit
if isinstance(args[0],Orbit) and hasattr(args[0]._orb,'t'):
ts= args[0]._orb.t[:-1]
else:
ts= self._tsJ[:-1]
if type == 'jr':
if downsample:
plotx= ts[::int(round(self._ntintJ//400))]
ploty= jr[0,::int(round(self._ntintJ//400))]/jr[0,-1]
plotz= anglerI[0,:-1:int(round(self._ntintJ//400))]
else:
plotx= ts
ploty= jr[0,:]/jr[0,-1]
plotz= anglerI[0,:-1]
bovy_plot.bovy_plot(plotx,ploty,
c=plotz,
s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$t$',
ylabel=r'$J^A_R / \langle J^A_R \rangle$',
clabel=r'$\theta^A_R$',
vmin=0.,vmax=2.*nu.pi,
crange=[0.,2.*nu.pi],
colorbar=True,
**kwargs)
elif type == 'lz':
if downsample:
plotx= ts[::int(round(self._ntintJ//400))]
ploty= lz[0,::int(round(self._ntintJ//400))]/lz[0,-1]
plotz= anglephiI[0,:-1:int(round(self._ntintJ//400))]
else:
plotx= ts
ploty= lz[0,:]/lz[0,-1]
plotz= anglephiI[0,:-1]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$t$',
ylabel=r'$L^A_Z / \langle L^A_Z \rangle$',
clabel=r'$\theta^A_\phi$',
vmin=0.,vmax=2.*nu.pi,
crange=[0.,2.*nu.pi],
colorbar=True,
**kwargs)
elif type == 'jz':
if downsample:
plotx= ts[::int(round(self._ntintJ//400))]
ploty= jz[0,::int(round(self._ntintJ//400))]/jz[0,-1]
plotz= anglezI[0,:-1:int(round(self._ntintJ//400))]
else:
plotx= ts
ploty= jz[0,:]/jz[0,-1]
plotz= anglezI[0,:-1]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$t$',
ylabel=r'$J^A_Z / \langle J^A_Z \rangle$',
clabel=r'$\theta^A_Z$',
vmin=0.,vmax=2.*nu.pi,
crange=[0.,2.*nu.pi],
colorbar=True,
**kwargs)
else:
if deperiod:
if 'ar' in type:
angleRT= dePeriod(nu.reshape(acfs[6],R.shape))
else:
angleRT= nu.reshape(acfs[6],R.shape)
if 'aphi' in type:
acfs7= nu.reshape(acfs[7],R.shape)
negFreqIndx= nu.median(acfs7-nu.roll(acfs7,1,axis=1),axis=1) < 0. #anglephi is decreasing
anglephiT= nu.empty(acfs7.shape)
anglephiT[negFreqIndx,:]= dePeriod(_TWOPI-acfs7[negFreqIndx,:])
negFreqPhi= nu.zeros(R.shape[0],dtype='bool')
negFreqPhi[negFreqIndx]= True
anglephiT[True^negFreqIndx,:]= dePeriod(acfs7[True^negFreqIndx,:])
else:
anglephiT= nu.reshape(acfs[7],R.shape)
if 'az' in type:
angleZT= dePeriod(nu.reshape(acfs[8],R.shape))
else:
angleZT= nu.reshape(acfs[8],R.shape)
xrange= None
yrange= None
else:
angleRT= nu.reshape(acfs[6],R.shape)
anglephiT= nu.reshape(acfs[7],R.shape)
angleZT= nu.reshape(acfs[8],R.shape)
xrange= [-0.5,2.*nu.pi+0.5]
yrange= [-0.5,2.*nu.pi+0.5]
vmin, vmax= 0.,2.*nu.pi
crange= [vmin,vmax]
if type == 'araz':
if downsample:
plotx= angleRT[0,::int(round(self._ntintJ//400))]
ploty= angleZT[0,::int(round(self._ntintJ//400))]
plotz= anglephiT[0,::int(round(self._ntintJ//400))]
else:
plotx= angleRT[0,:]
ploty= angleZT[0,:]
plotz= anglephiT[0,:]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$\theta^A_R$',
ylabel=r'$\theta^A_Z$',
clabel=r'$\theta^A_\phi$',
xrange=xrange,yrange=yrange,
vmin=vmin,vmax=vmax,
crange=crange,
colorbar=True,
**kwargs)
elif type == 'araphi':
if downsample:
plotx= angleRT[0,::int(round(self._ntintJ//400))]
ploty= anglephiT[0,::int(round(self._ntintJ//400))]
plotz= angleZT[0,::int(round(self._ntintJ//400))]
else:
plotx= angleRT[0,:]
ploty= anglephiT[0,:]
plotz= angleZT[0,:]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
xlabel=r'$\theta^A_R$',
clabel=r'$\theta^A_Z$',
ylabel=r'$\theta^A_\phi$',
xrange=xrange,yrange=yrange,
vmin=vmin,vmax=vmax,
crange=crange,
colorbar=True,
**kwargs)
elif type == 'azaphi':
if downsample:
plotx= angleZT[0,::int(round(self._ntintJ//400))]
ploty= anglephiT[0,::int(round(self._ntintJ//400))]
plotz= angleRT[0,::int(round(self._ntintJ//400))]
else:
plotx= angleZT[0,:]
ploty= anglephiT[0,:]
plotz= angleRT[0,:]
bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,
scatter=True,
edgecolor='none',
clabel=r'$\theta^A_R$',
xlabel=r'$\theta^A_Z$',
ylabel=r'$\theta^A_\phi$',
xrange=xrange,yrange=yrange,
vmin=vmin,vmax=vmax,
crange=crange,
colorbar=True,
**kwargs)
return None
def _parse_args(self,freqsAngles=True,_firstFlip=False,*args):
"""Helper function to parse the arguments to the __call__ and actionsFreqsAngles functions"""
from galpy.orbit import Orbit
RasOrbit= False
integrated= True #whether the orbit was already integrated when given
if len(args) == 5 or len(args) == 3: #pragma: no cover
raise IOError("Must specify phi for actionAngleIsochroneApprox")
if len(args) == 6 or len(args) == 4:
if len(args) == 6:
R,vR,vT, z, vz, phi= args
else:
R,vR,vT, phi= args
z, vz= 0., 0.
if isinstance(R,float):
os= [Orbit([R,vR,vT,z,vz,phi])]
RasOrbit= True
integrated= False
elif len(R.shape) == 1: #not integrated yet
os= [Orbit([R[ii],vR[ii],vT[ii],z[ii],vz[ii],phi[ii]]) for ii in range(R.shape[0])]
RasOrbit= True
integrated= False
if isinstance(args[0],Orbit) \
or (isinstance(args[0],list) and isinstance(args[0][0],Orbit)) \
or RasOrbit:
if RasOrbit:
pass
elif not isinstance(args[0],list):
os= [args[0]]
if len(os[0]._orb.vxvv) == 3 or len(os[0]._orb.vxvv) == 5: #pragma: no cover
raise IOError("Must specify phi for actionAngleIsochroneApprox")
else:
os= args[0]
if len(os[0]._orb.vxvv) == 3 or len(os[0]._orb.vxvv) == 5: #pragma: no cover
raise IOError("Must specify phi for actionAngleIsochroneApprox")
self._check_consistent_units_orbitInput(os[0])
if not hasattr(os[0]._orb,'orbit'): #not integrated yet
if _firstFlip:
for o in os:
o._orb.vxvv[1]= -o._orb.vxvv[1]
o._orb.vxvv[2]= -o._orb.vxvv[2]
o._orb.vxvv[4]= -o._orb.vxvv[4]
[o.integrate(self._tsJ,pot=self._pot,
method=self._integrate_method,
dt=self._integrate_dt) for o in os]
if _firstFlip:
for o in os:
o._orb.vxvv[1]= -o._orb.vxvv[1]
o._orb.vxvv[2]= -o._orb.vxvv[2]
o._orb.vxvv[4]= -o._orb.vxvv[4]
o._orb.orbit[:,1]= -o._orb.orbit[:,1]
o._orb.orbit[:,2]= -o._orb.orbit[:,2]
o._orb.orbit[:,4]= -o._orb.orbit[:,4]
integrated= False
ntJ= os[0].getOrbit().shape[0]
no= len(os)
R= nu.empty((no,ntJ))
vR= nu.empty((no,ntJ))
vT= nu.empty((no,ntJ))
z= nu.zeros((no,ntJ))+10.**-7. #To avoid numpy warnings for
vz= nu.zeros((no,ntJ))+10.**-7. #planarOrbits
phi= nu.empty((no,ntJ))
for ii in range(len(os)):
this_orbit= os[ii].getOrbit()
R[ii,:]= this_orbit[:,0]
vR[ii,:]= this_orbit[:,1]
vT[ii,:]= this_orbit[:,2]
if this_orbit.shape[1] == 6:
z[ii,:]= this_orbit[:,3]
vz[ii,:]= this_orbit[:,4]
phi[ii,:]= this_orbit[:,5]
else:
phi[ii,:]= this_orbit[:,3]
if freqsAngles and not integrated: #also integrate backwards in time, such that the requested point is not at the edge
no= R.shape[0]
nt= R.shape[1]
oR= | nu.empty((no,2*nt-1)) | numpy.empty |
import numpy as np
import pytest
import sklearn
from autogluon.core.metrics import confusion_matrix, log_loss, quadratic_kappa
from autogluon.core.metrics.softclass_metrics import soft_log_loss
def test_confusion_matrix_with_valid_inputs_without_labels_and_weights():
# Given
input_solution = [2, 0, 2, 2, 0, 1]
input_prediction = [0, 0, 2, 2, 0, 2]
expected_output = np.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])
# When
observed_output = confusion_matrix(input_solution, input_prediction)
# Then
assert( | np.array_equal(expected_output, observed_output) | numpy.array_equal |
# =======================================================================================
# =======================================================================================
import numpy as np
import sys
import getopt
import code # For development: code.interact(local=locals())
from datetime import datetime
from matplotlib.dates import date2num, num2date
import csv
from scipy.io import netcdf
import matplotlib.pyplot as plt
from calendar import monthrange
# =======================================================================================
# Parameters
# =======================================================================================
simple_verify_ts = False
simple_verify_means = False
epsilon = 0.622 # Ratio of gas constants vapor/dry air [g/g]
e_0 = 0.611 # saturation vapor pressure at 0C Clausius-Clapeyron [kPa]
L_vap = 2.5*10.0**6 # Latent heat of vaporization [J/kg]
R_vap = 461.0 # gas constant for water vapor [J/Kg/K]
T_0 = 273.0 # Temperature at freezing point of water [K]
d_per_mo = [31,28,31,30,31,30,31,31,30,31,30,31] # Days per month (NO LEAP YEAR)
# =======================================================================================
# Classes and Types
# =======================================================================================
class ctrltype:
# This holds control parameter specified in XML
def __init__(self,csv_file,time_res_sec_in,time_res_sec_out,n_header_row,n_fields, \
nodata_flags,grid_name_out,fill_value,missing_value, \
acknowledge,history,date_format,time_format,timestamp_type,utc_offset):
self.csv_file = csv_file
self.time_res_sec_in = time_res_sec_in
self.time_res_sec_out = time_res_sec_out
self.n_header_row = n_header_row
self.n_fields = n_fields
self.nodata_flags = nodata_flags
self.grid_name_out = grid_name_out
self.fill_value = fill_value
self.missing_value = missing_value
self.timestamp_type = timestamp_type
self.acknowledge = acknowledge
self.history = history
self.date_format = date_format
self.time_format = time_format
self.utc_offset = utc_offset
class contype:
# This holds constants specified in XML (Like Lat/lon)
def __init__(self,name,long_name,units,mode,value,dims):
self.name = name
self.long_name = long_name
self.units = units
self.mode = mode
self.value = float(value)
self.dims = int(dims)
class vartype:
# This holds time dependent variables specified in XML/CSV (like temp,etc)
def __init__(self,name,long_name,units,mode,col_id,unit_mult,unit_off):
self.name = name
self.long_name = long_name
self.units = units
self.mode = mode
self.col_id = col_id
self.unit_mult = unit_mult
self.unit_off = unit_off
# These are for generating diagnostic plots
self.d_mean = np.zeros((24,3))
self.d_mean[:,0] = 100000.0
self.m_mean = | np.zeros((12,3)) | numpy.zeros |
# @Time : 2020/10/19
# @Author : <NAME>
# @Email : <EMAIL>
# UPDATE
# @Time : 2021/7/9
# @Author : <NAME>
# @Email : <EMAIL>
"""
recbole.data.customized_dataset
##################################
We only recommend building customized datasets by inheriting.
Customized datasets named ``[Model Name]Dataset`` can be automatically called.
"""
from collections import defaultdict
import copy
import numpy as np
import torch
from recbole.data import get_dataloader
from recbole.data.dataset import KGSeqDataset, SequentialDataset
from recbole.data.interaction import Interaction
from recbole.sampler import SeqSampler
from recbole.sampler.sampler import MetaSeqSampler
from recbole.utils.enum_type import FeatureType, FeatureSource
class GRU4RecKGDataset(KGSeqDataset):
def __init__(self, config):
super().__init__(config)
class KSRDataset(KGSeqDataset):
def __init__(self, config):
super().__init__(config)
class DIENDataset(SequentialDataset):
""":class:`DIENDataset` is based on :class:`~recbole.data.dataset.sequential_dataset.SequentialDataset`.
It is different from :class:`SequentialDataset` in `data_augmentation`.
It add users' negative item list to interaction.
The original version of sampling negative item list is implemented by <NAME> (<EMAIL>) in 2021/2/25,
and he updated the codes in 2021/3/19. In 2021/7/9, Yupeng refactored SequentialDataset & SequentialDataLoader,
then refactored DIENDataset, either.
Attributes:
augmentation (bool): Whether the interactions should be augmented in RecBole.
seq_sample (recbole.sampler.SeqSampler): A sampler used to sample negative item sequence.
neg_item_list_field (str): Field name for negative item sequence.
neg_item_list (torch.tensor): all users' negative item history sequence.
"""
def __init__(self, config):
super().__init__(config)
list_suffix = config['LIST_SUFFIX']
neg_prefix = config['NEG_PREFIX']
self.seq_sampler = SeqSampler(self)
self.neg_item_list_field = neg_prefix + self.iid_field + list_suffix
self.neg_item_list = self.seq_sampler.sample_neg_sequence(self.inter_feat[self.iid_field])
def data_augmentation(self):
"""Augmentation processing for sequential dataset.
E.g., ``u1`` has purchase sequence ``<i1, i2, i3, i4>``,
then after augmentation, we will generate three cases.
``u1, <i1> | i2``
(Which means given user_id ``u1`` and item_seq ``<i1>``,
we need to predict the next item ``i2``.)
The other cases are below:
``u1, <i1, i2> | i3``
``u1, <i1, i2, i3> | i4``
"""
self.logger.debug('data_augmentation')
self._aug_presets()
self._check_field('uid_field', 'time_field')
max_item_list_len = self.config['MAX_ITEM_LIST_LENGTH']
self.sort(by=[self.uid_field, self.time_field], ascending=True)
last_uid = None
uid_list, item_list_index, target_index, item_list_length = [], [], [], []
seq_start = 0
for i, uid in enumerate(self.inter_feat[self.uid_field].numpy()):
if last_uid != uid:
last_uid = uid
seq_start = i
else:
if i - seq_start > max_item_list_len:
seq_start += 1
uid_list.append(uid)
item_list_index.append(slice(seq_start, i))
target_index.append(i)
item_list_length.append(i - seq_start)
uid_list = np.array(uid_list)
item_list_index = np.array(item_list_index)
target_index = | np.array(target_index) | numpy.array |
""" Defines the ColorMapper and ColorMapTemplate classes.
"""
# Major library imports
from types import IntType, FloatType
from numpy import arange, array, asarray, clip, divide, float32, int8, isinf, \
isnan, ones, searchsorted, sometrue, sort, take, uint8, where, zeros, \
linspace, ones_like
# Enthought library imports
from traits.api import Any, Array, Bool, Dict, Event, Float, HasTraits, \
Int, Property, Str, Trait
# Relative imports
from abstract_colormap import AbstractColormap
from data_range_1d import DataRange1D
from speedups import map_colors
class ColorMapTemplate(HasTraits):
"""
A class representing the state of a ColorMapper, for use when persisting
plots.
"""
# The segment data of the color map.
segment_map = Any
# The number of steps in the color map.
steps = Int(256)
# Low end of the color map range.
range_low_setting = Trait('auto', 'auto', Float)
# High end of the color map range.
range_high_setting = Trait('auto', 'auto', Float)
def __init__(self, colormap=None, **kwtraits):
"""
Creates this template from a color map instance or creates an empty
template.
"""
if colormap:
self.from_colormap(colormap)
return
def from_colormap(self, colormap):
""" Populates this template from a color map.
"""
self.segment_map = colormap._segmentdata.copy()
self.steps = colormap.steps
self.range_low_setting = colormap.range.low_setting
self.range_high_setting = colormap.range.high_setting
return
def to_colormap(self, range=None):
""" Returns a ColorMapper instance from this template.
"""
colormap = ColorMapper(self.segment_map, steps = self.steps)
if range:
colormap.range = range
else:
colormap.range = DataRange1D(low = self.range_low_setting,
high = self.range_high_setting)
return colormap
class ColorMapper(AbstractColormap):
""" Represents a simple band-of-colors style of color map.
The look-up transfer function is a simple linear function between defined
intensities. There is no limit to the number of steps that can be
defined. If the segment intervals contain very few array
locations, quantization errors will occur.
Construction of a ColorMapper can be done through the factory methods
from_palette_array() and from_segment_map(). Do not make direct calls to the
ColorMapper constructor.
"""
# The color table.
color_bands = Property(Array)
# The total number of color steps in the map.
steps = Int(256)
# The name of this color map.
name = Str
# Not used.
low_pos = None
# Not used.
high_pos = None
# A generic "update" event that generally means that anything that relies
# on this mapper for visual output should do a redraw or repaint.
updated = Event
# Are the mapping arrays out of date?
_dirty = Bool(True)
# The raw segment data for creating the mapping array.
_segmentdata = Dict # (Str, Tuple | List)
#------------------------------------------------------------------------
# Static methods.
#------------------------------------------------------------------------
@classmethod
def from_palette_array(cls, palette, **traits):
""" Creates a ColorMapper from a palette array.
The palette colors are linearly interpolated across the range of
mapped values.
The *palette* parameter is a Nx3 or Nx4 array of intensity values, where
N > 1::
[[R0, G0, B0], ... [R(N-1), G(N-1), B(N-1)]]
[[R0, G0, B0, A0], ... [R(N-1), G(N-1), B(N-1), A(N-1]]
"""
palette = asarray(palette)
n_colors, n_components = palette.shape
if n_colors < 2:
raise ValueError("Palette must contain at least two colors.")
if n_components not in (3,4):
raise ValueError("Palette must be of RGB or RGBA colors. "
"Got %s color components." % n_components)
# Compute the % offset for each of the color locations.
offsets = linspace(0.0, 1.0, n_colors)
# From the offsets and the color data, generate a segment map.
segment_map = {}
red_values = palette[:,0]
segment_map['red'] = zip(offsets, red_values, red_values)
green_values = palette[:,1]
segment_map['green'] = zip(offsets, green_values, green_values)
blue_values = palette[:,2]
segment_map['blue'] = zip(offsets, blue_values, blue_values)
if n_components == 3:
alpha_values = ones(n_colors)
else:
alpha_values = palette[:,3]
segment_map['alpha'] = zip(offsets, alpha_values, alpha_values)
return cls(segment_map, **traits)
@classmethod
def from_segment_map(cls, segment_map, **traits):
""" Creates a Colormapper from a segment map.
The *segment_map* parameter is a dictionary with 'red', 'green', and
'blue' (and optionally 'alpha') entries. Each entry is a list of
(x, y0, y1) tuples:
* x: an offset in [0..1] (offsets within the list must be in ascending order)
* y0: value for the color channel for values less than or equal to x
* y1: value for the color channel for values greater than x
When a data value gets mapped to a color, it will be normalized to be
within [0..1]. For each RGB(A) component, the two adjacent values will
be found in the segment_map. The mapped component value will be found by
linearly interpolating the two values.
Generally, y0==y1. Colormaps with sharp transitions will have y0!=y1 at
the transitions.
"""
if 'alpha' not in segment_map:
segment_map = segment_map.copy()
segment_map['alpha'] = [(0.0, 1.0, 1.0), (1.0, 1.0, 1.0)]
return cls(segment_map, **traits)
@classmethod
def from_file(cls, filename, **traits):
""" Creates a ColorMapper from a file.
The *filename* parameter is the name of a file whose lines each contain
4 or 5 float values between 0.0 and 1.0. The first value is an offset in
the range [0..1], and the remaining 3 or 4 values are red, green, blue,
and optionally alpha values for the color corresponding to that offset.
The first line is assumed to contain the name of the colormap.
"""
colormap_file = open(filename, 'r')
lines = colormap_file.readlines()
colormap_file.close()
rgba_arr = [[],[],[],[]]
for line in lines[1:]:
strvalues = line.strip().split()
values = [float32(value) for value in strvalues]
if len(values) > 4:
channels = (0,1,2,3)
else:
channels = (0,1,2)
for i in channels:
channeltuple = (values[0], values[i+1], values[i+1])
rgba_arr[i].append(channeltuple)
# Alpha is frequently unspecified.
if len(rgba_arr[-1]) == 0:
rgba_arr[-1] = [(0.0, 1.0, 1.0), (1.0, 1.0, 1.0)]
if 'name' not in traits:
# Don't override the code.
traits['name'] = lines[0].strip()
rgba_dict = {
'red': rgba_arr[0],
'green': rgba_arr[1],
'blue': rgba_arr[2],
'alpha': rgba_arr[3],
}
return cls(rgba_dict, **traits)
#------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------
def __init__(self, segmentdata, **kwtraits):
""" Creates a Colormapper from a segment map.
The *segment_map* parameter is a dictionary with 'red', 'green', and
'blue' (and optionally 'alpha') entries. Each entry is a list of
(x, y0, y1) tuples:
* x: an offset in [0..1] (offsets within the list must be in ascending order)
* y0: value for the color channel for values less than or equal to x
* y1: value for the color channel for values greater than x
When a data value gets mapped to a color, it will be normalized to be
within [0..1]. For each RGB(A) component, the two adjacent values will
be found in the segment_map. The mapped component value will be found by
linearly interpolating the two values.
Generally, y0==y1. Colormaps with sharp transitions will have y0!=y1 at
the transitions.
"""
self._segmentdata = segmentdata
super(ColorMapper, self).__init__(**kwtraits)
return
def map_screen(self, data_array):
""" Maps an array of data values to an array of colors.
"""
if self._dirty:
self._recalculate()
rgba = map_colors(data_array, self.steps, self.range.low,
self.range.high, self._red_lut, self._green_lut,
self._blue_lut, self._alpha_lut)
return rgba
def map_index(self, ary):
""" Maps an array of values to their corresponding color band index.
"""
if self._dirty:
self._recalculate()
indices = (ary - self.range.low) / (self.range.high - self.range.low) * self.steps
return clip(indices.astype(IntType), 0, self.steps - 1)
def reverse_colormap(self):
""" Reverses the color bands of this colormap.
"""
for name in ("red", "green", "blue", "alpha"):
data = asarray(self._segmentdata[name])
data[:, (1,2)] = data[:, (2,1)]
data[:,0] = (1.0 - data[:,0])
self._segmentdata[name] = data[::-1]
self._recalculate()
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _get_color_bands(self):
""" Gets the color bands array.
"""
if self._dirty:
self._recalculate()
luts = [self._red_lut, self._green_lut, self._blue_lut]
if self.color_depth is 'rgba':
luts.append(self._alpha_lut)
result = zip(*luts)
return result
def _recalculate(self):
""" Recalculates the mapping arrays.
"""
self._red_lut = self._make_mapping_array(
self.steps, self._segmentdata['red']
)
self._green_lut = self._make_mapping_array(
self.steps, self._segmentdata['green']
)
self._blue_lut = self._make_mapping_array(
self.steps, self._segmentdata['blue']
)
self._alpha_lut = self._make_mapping_array(
self.steps, self._segmentdata['alpha']
)
self.updated = True
self._dirty = False
return
#### matplotlib ####
def _make_mapping_array(self, n, data):
"""Creates an N-element 1-D lookup table
The *data* parameter is a list of x,y0,y1 mapping correspondences (which
can be lists or tuples), where all the items are values between 0 and 1,
inclusive. The items in the mapping are:
* x: a value being mapped
* y0: the value of y for values of x less than or equal to the given x value.
* y1: the value of y for values of x greater than the given x value.
The two values of y allow for discontinuous mapping functions (for
example, as might be found in a sawtooth function)
The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where result[x*(N-1)]
gives the closest value for values of x between 0 and 1.
"""
try:
adata = | array(data) | numpy.array |
"""
This module contains a class `sqrt_lasso`_ that implements
post selection for the square root lasso.
Code based on algorithms described in http://arxiv.org/abs/1504.08031.
"""
from copy import copy
import numpy as np, warnings
from scipy.stats import norm as ndist, chi as chidist
from scipy.interpolate import interp1d
from scipy.stats import t as tdist
from statsmodels.api import OLS
# regreg http://github.com/regreg
import regreg.api as rr
# local
from .lasso import _constraint_from_data
from ..constraints.quasi_affine import (constraints_unknown_sigma,
constraints as quasi_affine,
orthogonal as orthogonal_QA)
from ..constraints.affine import (constraints as affine_constraints,
gibbs_test,
sample_from_sphere)
from ..truncated import find_root
from ..distributions.discrete_multiparameter import multiparameter_family
from ..distributions.discrete_family import discrete_family
from ..sampling.sqrt_lasso import (sample_sqrt_lasso,
sample_sqrt_lasso_segment)
class sqlasso_objective(rr.smooth_atom):
"""
The square-root LASSO objective. Essentially
smooth, but singular on
$\{\beta: y=X\beta\}$.
This singularity is ignored in solving the problem.
It might be a problem sometimes?
"""
_sqrt2 = np.sqrt(2) # often used constant
def __init__(self, X, Y):
self.X = X
self.Y = Y
self._sqerror = rr.squared_error(X, Y)
def smooth_objective(self, x, mode='both', check_feasibility=False):
f, g = self._sqerror.smooth_objective(x, mode='both', check_feasibility=check_feasibility)
f = self._sqrt2 * np.sqrt(f)
if mode == 'both':
return f, g / f
elif mode == 'grad':
return g / f
elif mode == 'func':
return f
else:
raise ValueError("mode incorrectly specified")
def solve_sqrt_lasso(X, Y, weights=None, initial=None, **solve_kwargs):
"""
Solve the square-root LASSO optimization problem:
$$
\text{minimize}_{\beta} \|y-X\beta\|_2 + D |\beta|,
$$
where $D$ is the diagonal matrix with weights on its diagonal.
Parameters
----------
y : np.float((n,))
The target, in the model $y = X\beta$
X : np.float((n, p))
The data, in the model $y = X\beta$
weights : np.float
Coefficients of the L-1 penalty in
optimization problem, note that different
coordinates can have different coefficients.
initial : np.float(p)
Initial point for optimization.
solve_kwargs : dict
Arguments passed to regreg solver.
"""
n, p = X.shape
if n < p:
return solve_sqrt_lasso_skinny(X, Y, weights=weights, initial=initial, **solve_kwargs)
else:
return solve_sqrt_lasso_fat(X, Y, weights=weights, initial=initial, **solve_kwargs)
def solve_sqrt_lasso_fat(X, Y, weights=None, initial=None, **solve_kwargs):
"""
Solve the square-root LASSO optimization problem:
$$
\text{minimize}_{\beta} \|y-X\beta\|_2 + D |\beta|,
$$
where $D$ is the diagonal matrix with weights on its diagonal.
Parameters
----------
y : np.float((n,))
The target, in the model $y = X\beta$
X : np.float((n, p))
The data, in the model $y = X\beta$
weights : np.float
Coefficients of the L-1 penalty in
optimization problem, note that different
coordinates can have different coefficients.
initial : np.float(p)
Initial point for optimization.
solve_kwargs : dict
Arguments passed to regreg solver.
"""
X = rr.astransform(X)
n, p = X.output_shape[0], X.input_shape[0]
if weights is None:
lam = choose_lambda(X)
weights = lam * np.ones((p,))
loss = sqlasso_objective(X, Y)
penalty = rr.weighted_l1norm(weights, lagrange=1.)
problem = rr.simple_problem(loss, penalty)
if initial is not None:
problem.coefs[:] = initial
soln = problem.solve(**solve_kwargs)
return soln
class sqlasso_objective_skinny(rr.smooth_atom):
"""
The square-root LASSO objective on larger parameter space:
.. math::
(\beta, \sigma) \mapsto \frac{\|y-X\beta\|_2^2}{\sigma} + \sigma
"""
def __init__(self, X, Y):
self.X = rr.astransform(X)
n, p = self.X.output_shape[0], self.X.input_shape[0]
self.Y = Y
if n > p:
self._quadratic_term = np.dot(X.T, X)
self._linear_term = -2 * np.dot(X.T, Y)
self._constant_term = (Y**2).sum()
self._sqerror = rr.squared_error(X, Y)
def smooth_objective(self, x, mode='both', check_feasibility=False):
n, p = self.X.output_shape[0], self.X.input_shape[0]
beta, sigma = x[:p], x[p]
if n > p:
if mode in ['grad', 'both']:
g = np.zeros(p+1)
g0 = np.dot(self._quadratic_term, beta)
f1 = self._constant_term + (self._linear_term * beta).sum() + (g0 * beta).sum()
g1 = 2 * g0 + self._linear_term
else:
g1 = np.dot(self._quadratic_term, beta)
f1 = self._constant_term + (self._linear_term * beta).sum() + (g1 * beta).sum()
else:
if mode in ['grad', 'both']:
g = np.zeros(p+1)
f1, g1 = self._sqerror.smooth_objective(beta, 'both')
f1 *= 2; g1 *= 2
else:
f1 = self._sqerror.smooth_objective(beta, 'func')
f1 *= 2
f = f1 / sigma + sigma
if mode == 'both':
g[:p] = g1 / sigma
g[p] = -f1 / sigma**2 + 1.
return f, g
elif mode == 'grad':
g[:p] = g1 / sigma
g[p] = -f1 / sigma**2 + 1.
return g
elif mode == 'func':
return f
else:
raise ValueError("mode incorrectly specified")
def solve_sqrt_lasso_skinny(X, Y, weights=None, initial=None, **solve_kwargs):
"""
Solve the square-root LASSO optimization problem:
$$
\text{minimize}_{\beta} \|y-X\beta\|_2 + D |\beta|,
$$
where $D$ is the diagonal matrix with weights on its diagonal.
Parameters
----------
y : np.float((n,))
The target, in the model $y = X\beta$
X : np.float((n, p))
The data, in the model $y = X\beta$
weights : np.float
Coefficients of the L-1 penalty in
optimization problem, note that different
coordinates can have different coefficients.
initial : np.float(p)
Initial point for optimization.
solve_kwargs : dict
Arguments passed to regreg solver.
"""
n, p = X.shape
if weights is None:
lam = choose_lambda(X)
weights = lam * np.ones((p,))
weight_dict = dict(zip(np.arange(p),
2 * weights))
penalty = rr.mixed_lasso(range(p) + [rr.NONNEGATIVE], lagrange=1.,
weights=weight_dict)
loss = sqlasso_objective_skinny(X, Y)
problem = rr.simple_problem(loss, penalty)
problem.coefs[-1] = np.linalg.norm(Y)
if initial is not None:
problem.coefs[:-1] = initial
soln = problem.solve(**solve_kwargs)
return soln[:-1]
class sqrt_lasso(object):
r"""
A class for the square-root LASSO for post-selection inference.
The problem solved is
.. math::
\text{minimize}_{\beta} \|y-X\beta\|^2 +
\lambda \|\beta\|_1
where $\lambda$ is `lam` and
.. math::
\lambda_{\max} = \frac{1}{n} \|X^Ty\|_{\infty}
"""
# level for coverage is 1-alpha
alpha = 0.05
UMAU = False
def __init__(self, y, X, weights):
"""
Parameters
----------
y : np.float(y)
The target, in the model $y = X\beta$
X : np.float((n, p))
The data, in the model $y = X\beta$
weights : np.float(p) or float
Coefficients in weighted L-1 penalty in
optimization problem. If a float,
weights are proportional to 1.
"""
n, p = X.shape
self.y = y
self.X = X
n, p = X.shape
if np.array(weights).shape == ():
weights = weights * np.ones(p)
self.weights = weights
def fit(self, **solve_kwargs):
"""
Fit the square root LASSO using `regreg`
using `weights=self.weights.`
Parameters
----------
solve_kwargs : dict
Arguments passed to regreg solver.
Returns
-------
soln : np.float
Solution to lasso with `sklearn_alpha=self.lagrange`.
"""
y, X = self.y, self.X
n, p = self.X.shape
if n < p:
self._soln = solve_sqrt_lasso_skinny(X, y, self.weights, **solve_kwargs)
else:
self._soln = solve_sqrt_lasso_fat(X, y, self.weights, **solve_kwargs)
beta = self._soln
self.active = (beta != 0) # E
nactive = self.active.sum() # |E|
if nactive:
self.z_E = np.sign(beta[self.active]) # z_E
# calculate the "partial correlation" operator R = X_{-E}^T (I - P_E)
X_E = self._X_E = self.X[:,self.active]
X_notE = self.X[:,~self.active]
self._XEinv = np.linalg.pinv(X_E)
self.df_E = n - nactive
self.P_E = np.dot(X_E, self._XEinv)
self.R_E = np.identity(n) - self.P_E
w_E = np.dot(self._XEinv.T, self.weights[self.active] * self.z_E)
sigma_multiplier = np.sqrt(self.df_E / (1 - np.linalg.norm(w_E)**2))
self.sigma_E = np.linalg.norm((y - np.dot(self.P_E, y))) / np.sqrt(self.df_E)
(self._active_constraints,
self._inactive_constraints,
self._constraints) = _constraint_from_data(X_E,
X_notE,
self.z_E,
self.active,
sigma_multiplier * self.sigma_E * self.weights,
self.sigma_E,
np.dot(X_notE.T, self.R_E))
W_E = np.dot(self._XEinv, w_E)
s_E = np.sign(self.z_E * W_E)
self._S_trunc_denominator = denominator = sigma_multiplier * W_E * self.z_E
self.S_trunc_interval = self.compute_sigma_truncation_interval(np.dot(self._XEinv, y))
# HACK to make things more stable?
self.S_trunc_interval[0] = 0
self._quasi_affine_constraints = orthogonal_QA(self._active_constraints.linear_part,
np.zeros(self._active_constraints.linear_part.shape[0]),
self._active_constraints.offset / (self.sigma_E * np.sqrt(self.df_E)),
(self.sigma_E * np.sqrt(self.df_E))**2,
self.df_E)
# for metropolis hastings data carving sampler
self.full_quasi = quasi_affine(self.constraints.linear_part,
np.zeros(self.constraints.linear_part.shape[0]),
self.constraints.offset / (self.sigma_E * np.sqrt(self.df_E)),
self.R_E)
cov = np.identity(n) * self.sigma_hat**2
for con in [self._active_constraints,
self._inactive_constraints,
self._constraints,
self._quasi_affine_constraints]:
con.covariance[:] = cov
else:
self.df_E = self.y.shape[0]
self.sigma_E = np.linalg.norm(y) / np.sqrt(self.df_E)
self.S_trunc_interval = [0, np.inf]
self._active_constraints = self._inactive_constraints = self._constraints = None
self.active = np.nonzero(self.active)[0]
def compute_sigma_truncation_interval(self, coef, raise_if_outside=False):
numerator = coef * self.z_E
denominator = self._S_trunc_denominator
s_E = | np.sign(self.z_E * denominator) | numpy.sign |
'''
Visualization for RGB results.
'''
import sys, os
cur_file_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(cur_file_path, '..'))
import importlib, time, math, shutil, csv, random
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from utils.config import SplitLineParser
from utils.transforms import rotation_matrix_to_angle_axis, batch_rodrigues
from utils.torch import load_state
from utils.logging import mkdir
from fitting.fitting_utils import load_res, prep_res, run_smpl
from fitting.eval_utils import SMPL_SIZES
from body_model.body_model import BodyModel
from body_model.utils import SMPL_PATH, SMPLH_PATH, SMPL_JOINTS, SMPLX_PATH
from viz.utils import viz_smpl_seq, viz_results, create_gif, create_video, create_multi_comparison_images, smpl_connections, imapper_connections, comp_connections
from viz.mesh_viewer import COMPRESS_PARAMS
J_BODY = len(SMPL_JOINTS)-1 # no root
GT_RES_NAME = 'gt_results'
PRED_RES_NAME = 'stage3_results'
PRED_PRIOR_RES_NAME = 'stage3_results_prior'
STAGES_RES_NAMES = ['stage1_results', 'stage2_results', 'stage3_init_results'] # results in camera frame
STAGES_PRIOR_RES_NAMES = ['stage2_results_prior', 'stage3_init_results_prior'] # results in prior frame (w.r.t final floor fit)
FINAL_RES_NAME = 'final_results'
FINAL_PRIOR_RES_NAME = 'final_results_prior'
OBS_NAME = 'observations'
FPS = 30
# visualization options
GROUND_ALPHA = 1.0
BODY_ALPHA = None # use to make body mesh translucent
IM_EXTN = 'jpg' # png # to use for rendering jpg saves a lot of space
def parse_args(argv):
parser = SplitLineParser(fromfile_prefix_chars='@', allow_abbrev=False)
parser.add_argument('--results', type=str, required=True, help='Path to the results_out directory from fitting to run viz on.')
parser.add_argument('--out', type=str, required=True, help='Path to save visualizations to.')
# visualization options
parser.add_argument('--viz-final-only', dest='viz_final_only', action='store_true', help="If given only visualize the final full sequence result and not the subsequences.")
parser.set_defaults(viz_final_only=False)
parser.add_argument('--viz-stages', dest='viz_stages', action='store_true', help="If given, visualizes intermediate optimization stages and comparison to final pred.")
parser.set_defaults(viz_stages=False)
parser.add_argument('--viz-prior-frame', dest='viz_prior_frame', action='store_true', help="If given, also visualizes results in the HuMoR canonical coordinate frame.")
parser.set_defaults(viz_prior_frame=False)
parser.add_argument('--viz-obs-2d', dest='viz_obs_2d', action='store_true', help="If given, visualizes 2D joint observations on top of og video")
parser.set_defaults(viz_obs_2d=False)
parser.add_argument('--viz-no-render-cam-body', dest='viz_render_cam_body', action='store_false', help="If given, does not render body mesh from camera view")
parser.set_defaults(viz_render_cam_body=True)
parser.add_argument('--viz-pred-floor', dest='viz_pred_floor', action='store_true', help="Render the predicted floor from the camera view.")
parser.set_defaults(viz_pred_floor=False)
parser.add_argument('--viz-contacts', dest='viz_contacts', action='store_true', help="Render predicted contacts on the joints")
parser.set_defaults(viz_contacts=False)
parser.add_argument('--viz-wireframe', dest='viz_wireframe', action='store_true', help="Render body and floor in wireframe")
parser.set_defaults(viz_wireframe=False)
parser.add_argument('--viz-bodies-static', type=int, default=None, help="If given, renders all body predictions at once at this given frame interval interval.")
parser.add_argument('--viz-no-bg', dest='viz_bg', action='store_false', help="If given will not overlay the rendering on top of OG video.")
parser.set_defaults(viz_bg=True)
parser.add_argument('--viz-render-width', type=int, default=1280, help="Width of rendered output images")
parser.add_argument('--viz-render-height', type=int, default=720, help="Width of rendered output images")
parser.add_argument('--shuffle', dest='shuffle', action='store_true', help="Shuffles viz ordering")
parser.set_defaults(shuffle=False)
parser.add_argument('--flip-img', dest='flip_img', action='store_true', help="Flips the loaded image about y-axis. This is useful for PROX result.")
parser.set_defaults(flip_img=False)
known_args, unknown_args = parser.parse_known_args(argv)
return known_args
def main(args):
print(args)
mkdir(args.out)
qual_out_path = args.out
D_IMW, D_IMH = args.viz_render_width, args.viz_render_height
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
# collect our results directories
all_result_dirs = [os.path.join(args.results, f) for f in sorted(os.listdir(args.results)) if f[0] != '.']
all_result_dirs = [f for f in all_result_dirs if os.path.isdir(f)]
if args.shuffle:
random.seed(0)
random.shuffle(all_result_dirs)
print(all_result_dirs)
seq_name_list = []
body_model_dict = dict()
for residx, result_dir in enumerate(all_result_dirs):
seq_name = result_dir.split('/')[-1]
is_final_res = seq_name == 'final_results'
if not is_final_res:
if args.viz_final_only:
continue
seq_name = '_'.join(result_dir.split('/')[-1].split('_')[:-1])
print('Visualizing %s %d / %d...' % (seq_name, residx, len(all_result_dirs)))
obs_dict = load_res(result_dir, OBS_NAME + '.npz')
cur_img_paths = obs_dict['img_paths'] # used to load in results from baselines
cur_frame_names = ['.'.join(f.split('/')[-1].split('.')[:-1]) for f in cur_img_paths]
# load in humor prediction
pred_res = load_res(result_dir, PRED_RES_NAME + '.npz')
if pred_res is None:
print('Could not find final pred (stage 3) results for %s, skipping...' % (seq_name))
continue
T = pred_res['trans'].shape[0]
# check if have any nans valid
for smpk in SMPL_SIZES.keys():
cur_valid = (torch.sum(torch.logical_not(torch.isfinite(torch.Tensor(pred_res[smpk])))).item() == 0)
if not cur_valid:
print('Found NaNs in prediction for %s, filling with zeros...' % (smpk))
# print(pred_res[smpk].shape)
if smpk == 'betas':
pred_res[smpk] = np.zeros((pred_res[smpk].shape[0]), dtype=np.float)
else:
pred_res[smpk] = np.zeros((T, pred_res[smpk].shape[1]), dtype=np.float)
floor_valid = (torch.sum(torch.logical_not(torch.isfinite(torch.Tensor(pred_res['floor_plane'])))).item() == 0)
if not floor_valid:
print('Predicted floor is NaN, replacing with up.')
pred_res['floor_plane'] = np.array([0.0, -1.0, 0.0, 0.0])
pred_res = prep_res(pred_res, device, T)
num_pred_betas = pred_res['betas'].size(1)
pred_floor_plane = torch.Tensor(pred_res['floor_plane']).to(device)
# humor prediction in prior frame
pred_res_prior = None
if args.viz_prior_frame:
pred_res_prior = load_res(result_dir, PRED_PRIOR_RES_NAME + '.npz')
if pred_res_prior is None:
print('Could not find final prior pred (stage 3) results for %s, skipping...' % (seq_name))
continue
pred_res_prior = prep_res(pred_res_prior, device, T)
# load stages results if needed
cur_viz_stages = args.viz_stages and not is_final_res
cur_stages_res = None
if cur_viz_stages:
cur_stages_res = dict()
for stage_name in STAGES_RES_NAMES:
stage_res = load_res(result_dir, stage_name + '.npz')
if stage_res is None:
print('Could not find results for stage %s of %s, skipping...' % (stage_name, seq_name))
continue
cur_stages_res[stage_name] = prep_res(stage_res, device, T)
# load prior stages results if needed
cur_stages_prior_res = None
if args.viz_prior_frame and cur_viz_stages:
cur_stages_prior_res = dict()
for stage_name in STAGES_PRIOR_RES_NAMES:
stage_res = load_res(result_dir, stage_name + '.npz')
if stage_res is None:
print('Could not find results for stage %s of %s, skipping...' % (stage_name, seq_name))
continue
cur_stages_prior_res[stage_name] = prep_res(stage_res, device, T)
#
# create body models for each
#
meta_path = os.path.join(result_dir, 'meta.txt')
if not os.path.exists(meta_path):
print('Could not find metadata for %s, skipping...' % (seq_name))
continue
optim_bm_path = gt_bm_path = None
with open(meta_path, 'r') as f:
optim_bm_str = f.readline().strip()
optim_bm_path = optim_bm_str.split(' ')[1]
gt_bm_str = f.readline().strip()
gt_bm_path = gt_bm_str.split(' ')[1]
# humor model
pred_bm = None
if optim_bm_path not in body_model_dict:
pred_bm = BodyModel(bm_path=optim_bm_path,
num_betas=num_pred_betas,
batch_size=T).to(device)
if not is_final_res:
# final results will be different length, so want to re-load for subsequences
body_model_dict[optim_bm_path] = pred_bm
if not is_final_res:
pred_bm = body_model_dict[optim_bm_path]
# we are using this sequence for sure
seq_name_list.append(seq_name)
# run through SMPL
pred_body = run_smpl(pred_res, pred_bm)
stages_body = None
if cur_stages_res is not None:
stages_body = dict()
for k, v in cur_stages_res.items():
stages_body[k] = run_smpl(v, pred_bm)
# get body smpl joints
stage_body_joints = stages_body[k].Jtr[:, :len(SMPL_JOINTS)]
cur_stages_res[k]['joints3d_smpl'] = stage_body_joints
# prior frame through SMPL
pred_prior_body = None
if pred_res_prior is not None:
pred_prior_body = run_smpl(pred_res_prior, pred_bm)
stages_prior_body = None
if cur_stages_prior_res is not None:
stages_prior_body = dict()
for k, v in cur_stages_prior_res.items():
stages_prior_body[k] = run_smpl(v, pred_bm)
# load in image frames
IMW, IMH = None, None
img_arr = | np.zeros((T, D_IMH, D_IMW, 3), dtype=np.float32) | numpy.zeros |
import os
import pandas as pd
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
df_all = pd.read_csv("result_all.csv")
df_kanto = pd.read_csv("result_kanto.csv")
df_kanto_groupby = df_kanto.groupby("station_id", as_index=False).mean()
df_timeserias_all = pd.read_csv("result_timeserias_all.csv")
# Or using s3 bucket.
# df = pd.read_csv("s3://your-bucket/result.csv")
MAPBOX_ACCESS_TOKEN = os.getenv("MAPBOX_ACCESS_TOKEN", "YOUR TOKEN")
# FIXME: input your mapbox token
# https://docs.mapbox.com/help/how-mapbox-works/access-tokens/
PORT = os.getenv("PORT", "8080")
app = dash.Dash()
application = app.server
app.css.append_css({
"external_url": "https://cdn.rawgit.com/plotly/dash-app-stylesheets/"
"2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css"})
app.layout = html.Div(children=[
html.H1(children="温度マップ"),
html.H2(children="全国の温度マップ"),
html.Div([
html.Div([
dcc.Graph(id="temp-map",)
], className="eight columns"),
html.Div([
html.Button("平均気温", id="btn-avg", n_clicks_timestamp="0"),
html.Button("最高気温", id="btn-high", n_clicks_timestamp="0"),
html.Button("最低気温", id="btn-low", n_clicks_timestamp="0"),
html.Div(id="container-button-temp-select")
], className="four columns"),
html.Div([
dcc.Graph(id="low-and-high",
figure={
"data": [
go.Scatter(
x=df_all[df_all["prefecture"] == i]["low_temperature"],
y=df_all[df_all["prefecture"] == i]["high_temperature"],
text=df_all[df_all["prefecture"] == i]["prefecture"],
mode="markers",
opacity=0.7,
marker={
"size": 15,
"line": {"width": 0.5, "color": "white"}
},
name=i
)for i in df_all["prefecture"].unique()
],
"layout": go.Layout(
xaxis={"title": "最低気温"},
yaxis={"title": "最高気温"},
margin={"l": 50, "b": 50, "t": 10, "r": 10},
showlegend=False,
hovermode="closest")
})
], className="four columns")
], className="row"),
html.H2(children="関東の温度マップ"),
html.Div([
html.Div([
dcc.Graph(
id="temp-kanto-map",
figure={
"data": [
go.Scattermapbox(
lat=df_kanto_groupby[df_kanto_groupby["station_id"] == i]["latitude"],
lon=df_kanto_groupby[df_kanto_groupby["station_id"] == i]["longitude"],
mode="markers",
customdata=df_kanto_groupby[df_kanto_groupby["station_id"] == i]["station_id"],
marker=dict(
symbol="circle",
size=16,
opacity=0.8,
colorscale="RdBu",
cmin=df_kanto_groupby["avg_temperature"].min(),
color=df_kanto_groupby[df_kanto_groupby["station_id"] == i]["avg_temperature"],
cmax=df_kanto_groupby["avg_temperature"].max(),
),
text=df_kanto_groupby[df_kanto_groupby["station_id"] == i]["avg_temperature"],
) for i in df_kanto_groupby["station_id"].unique()
],
"layout":
go.Layout(
autosize=True,
showlegend=False,
hovermode="closest",
mapbox=dict(
accesstoken=MAPBOX_ACCESS_TOKEN,
bearing=0,
center=dict(
lat=np.mean(df_kanto_groupby["latitude"]),
lon=np.mean(df_kanto_groupby["longitude"])
),
pitch=100,
zoom=8,
),
height=600
)
}
)
], className="six columns"),
html.Div([
dcc.Graph(id="temp-timesearias")
], className="six columns",)
], className="row"),
])
@app.callback(dash.dependencies.Output("temp-map", "figure"),
[dash.dependencies.Input("btn-avg", "n_clicks_timestamp"),
dash.dependencies.Input('btn-high', "n_clicks_timestamp"),
dash.dependencies.Input("btn-low", "n_clicks_timestamp")])
def update_temp_map(btn1, btn2, btn3):
if int(btn1) > int(btn2) and int(btn1) > int(btn3):
index = "avg_temperature"
elif int(btn2) > int(btn1) and int(btn2) > int(btn3):
index = "high_temperature"
elif int(btn3) > int(btn1) and int(btn3) > int(btn2):
index = "low_temperature"
else:
index = "avg_temperature"
return {
"data": [
go.Scattermapbox(
lat=df_all[df_all["prefecture"] == i]["latitude"],
lon=df_all[df_all["prefecture"] == i]["longitude"],
mode="markers",
marker=dict(
symbol="circle",
size=20,
opacity=0.8,
colorscale="RdBu",
cmin=df_all[index].min(),
color=df_all[df_all["prefecture"] == i][index],
cmax=df_all[index].max(),
),
text=df_all[df_all["prefecture"] == i][index],
name=str(df_all[df_all["prefecture"] == i]["prefecture"].values),
) for i in df_all["prefecture"].unique()
],
"layout":
go.Layout(
autosize=True,
showlegend=False,
hovermode="closest",
mapbox=dict(
accesstoken=MAPBOX_ACCESS_TOKEN,
bearing=0,
center=dict(
lat=np.mean(df_all["latitude"]),
lon= | np.mean(df_all["longitude"]) | numpy.mean |
import inspect
import albumentations
import mmcv
import numpy as np
from albumentations import Compose
from imagecorruptions import corrupt
from numpy import random
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from ..registry import PIPELINES
@PIPELINES.register_module
class Resize(object):
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- `ratio_range` is not None: randomly sample a ratio from the ratio range
and multiply it with the image scale.
- `ratio_range` is None and `multiscale_mode` == "range": randomly sample a
scale from the a range.
- `ratio_range` is None and `multiscale_mode` == "value": randomly sample a
scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
@staticmethod
def random_select(img_scales):
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
img_shape = results['img_shape']
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
results[key] = bboxes
def _resize_masks(self, results):
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
masks = [
mmcv.imrescale(
mask, results['scale_factor'], interpolation='nearest')
for mask in results[key]
]
else:
mask_size = (results['img_shape'][1], results['img_shape'][0])
masks = [
mmcv.imresize(mask, mask_size, interpolation='nearest')
for mask in results[key]
]
results[key] = masks
def _resize_landmarks(self, results):
key = "gt_landmarks"
if key in results:
img_shape = results['img_shape']
w_scale = results['scale_factor'][0]
h_scale = results['scale_factor'][1]
scale_factor = np.array([w_scale, h_scale] * 5, dtype=np.float32)
ldms = results[key] * scale_factor
ldms[:, 0::2] = np.clip(ldms[:, 0::2], 0, img_shape[1] - 1)
ldms[:, 1::2] = np.clip(ldms[:, 1::2], 0, img_shape[0] - 1)
results[key] = ldms
def __call__(self, results):
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_landmarks(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += ('(img_scale={}, multiscale_mode={}, ratio_range={}, '
'keep_ratio={})').format(self.img_scale,
self.multiscale_mode,
self.ratio_range,
self.keep_ratio)
return repr_str
@PIPELINES.register_module
class RandomFlip(object):
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
flip_ratio (float, optional): The flipping probability.
"""
def __init__(self, flip_ratio=None):
self.flip_ratio = flip_ratio
if flip_ratio is not None:
assert flip_ratio >= 0 and flip_ratio <= 1
def bbox_flip(self, bboxes, img_shape):
"""Flip bboxes horizontally.
Args:
bboxes(ndarray): shape (..., 4*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 4 == 0
w = img_shape[1]
flipped = bboxes.copy()
flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
return flipped
def __call__(self, results):
if 'flip' not in results:
flip = True if np.random.rand() < self.flip_ratio else False
results['flip'] = flip
if results['flip']:
# flip image
results['img'] = mmcv.imflip(results['img'])
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] = self.bbox_flip(results[key],
results['img_shape'])
# flip masks
for key in results.get('mask_fields', []):
results[key] = [mask[:, ::-1] for mask in results[key]]
return results
def __repr__(self):
return self.__class__.__name__ + '(flip_ratio={})'.format(
self.flip_ratio)
@PIPELINES.register_module
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value, 0 by default.
"""
def __init__(self, size=None, size_divisor=None, pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
if self.size is not None:
padded_img = mmcv.impad(results['img'], self.size)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_masks(self, results):
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
padded_masks = [
mmcv.impad(mask, pad_shape, pad_val=self.pad_val)
for mask in results[key]
]
results[key] = np.stack(padded_masks, axis=0)
def __call__(self, results):
self._pad_img(results)
self._pad_masks(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(size={}, size_divisor={}, pad_val={})'.format(
self.size, self.size_divisor, self.pad_val)
return repr_str
@PIPELINES.register_module
class Normalize(object):
"""Normalize the image.
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(mean={}, std={}, to_rgb={})'.format(
self.mean, self.std, self.to_rgb)
return repr_str
@PIPELINES.register_module
class RandomCrop(object):
"""Random crop the image & bboxes & masks.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
"""
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, results):
img = results['img']
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, :]
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1)
results[key] = bboxes
# filter out the gt bboxes that are completely cropped
if 'gt_bboxes' in results:
gt_bboxes = results['gt_bboxes']
valid_inds = (gt_bboxes[:, 2] > gt_bboxes[:, 0]) & (
gt_bboxes[:, 3] > gt_bboxes[:, 1])
# if no gt bbox remains after cropping, just skip this image
if not np.any(valid_inds):
return None
results['gt_bboxes'] = gt_bboxes[valid_inds, :]
if 'gt_labels' in results:
results['gt_labels'] = results['gt_labels'][valid_inds]
# filter and crop the masks
if 'gt_masks' in results:
valid_gt_masks = []
for i in np.where(valid_inds)[0]:
gt_mask = results['gt_masks'][i][crop_y1:crop_y2, crop_x1:
crop_x2]
valid_gt_masks.append(gt_mask)
results['gt_masks'] = valid_gt_masks
return results
def __repr__(self):
return self.__class__.__name__ + '(crop_size={})'.format(
self.crop_size)
@PIPELINES.register_module
class SegResizeFlipPadRescale(object):
"""A sequential transforms to semantic segmentation maps.
The same pipeline as input images is applied to the semantic segmentation
map, and finally rescale it by some scale factor. The transforms include:
1. resize
2. flip
3. pad
4. rescale (so that the final size can be different from the image size)
Args:
scale_factor (float): The scale factor of the final output.
"""
def __init__(self, scale_factor=1):
self.scale_factor = scale_factor
def __call__(self, results):
if results['keep_ratio']:
gt_seg = mmcv.imrescale(
results['gt_semantic_seg'],
results['scale'],
interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results['gt_semantic_seg'],
results['scale'],
interpolation='nearest')
if results['flip']:
gt_seg = mmcv.imflip(gt_seg)
if gt_seg.shape != results['pad_shape']:
gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2])
if self.scale_factor != 1:
gt_seg = mmcv.imrescale(
gt_seg, self.scale_factor, interpolation='nearest')
results['gt_semantic_seg'] = gt_seg
return results
def __repr__(self):
return self.__class__.__name__ + '(scale_factor={})'.format(
self.scale_factor)
@PIPELINES.register_module
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, results):
img = results['img']
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += ('(brightness_delta={}, contrast_range={}, '
'saturation_range={}, hue_delta={})').format(
self.brightness_delta, self.contrast_range,
self.saturation_range, self.hue_delta)
return repr_str
@PIPELINES.register_module
class Expand(object):
"""Random expand the image & bboxes.
Randomly place the original image on a canvas of 'ratio' x original image
size filled with mean values. The ratio is in the range of ratio_range.
Args:
mean (tuple): mean value of dataset.
to_rgb (bool): if need to convert the order of mean to align with RGB.
ratio_range (tuple): range of expand ratio.
"""
def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
def __call__(self, results):
if random.randint(2):
return results
img, boxes = [results[k] for k in ('img', 'gt_bboxes')]
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
boxes = boxes + np.tile((left, top), 2).astype(boxes.dtype)
results['img'] = expand_img
results['gt_bboxes'] = boxes
if 'gt_masks' in results:
expand_gt_masks = []
for mask in results['gt_masks']:
expand_mask = np.full((int(h * ratio), int(w * ratio)),
0).astype(mask.dtype)
expand_mask[top:top + h, left:left + w] = mask
expand_gt_masks.append(expand_mask)
results['gt_masks'] = expand_gt_masks
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(mean={}, to_rgb={}, ratio_range={})'.format(
self.mean, self.to_rgb, self.ratio_range)
return repr_str
@PIPELINES.register_module
class MinIoURandomCrop(object):
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold
crop_size (tuple): Expected size after cropping, (h, w).
"""
def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
# 1: return ori img
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
def __call__(self, results):
img, boxes, labels = [
results[k] for k in ('img', 'gt_bboxes', 'gt_labels')
]
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
if mode == 1:
return results
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = ((center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) *
(center[:, 0] < patch[2]) * (center[:, 1] < patch[3]))
if not mask.any():
continue
boxes = boxes[mask]
labels = labels[mask]
# adjust boxes
img = img[patch[1]:patch[3], patch[0]:patch[2]]
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= | np.tile(patch[:2], 2) | numpy.tile |
# -*- coding: utf-8 -*-
from screws.freeze.main import FrozenOnly
import numpy as np
class ___3dCSCG_1Form_Vortex_Detection___(FrozenOnly):
"""A wrapper of all vortex detection methods.
So, we consider this 1 form as a variable of a flow field.
"""
def __init__(self, _1sf):
self._sf_ = _1sf
self._freeze_self_()
def ___PRIVATE_generate_gradient_tensor_at___(self, xi, eta, sigma):
"""We compute the gradient tensor of this 1form.
To do so, we first project this 1-form into a vector of 3 standard 0-forms which represent
the three components. Then we do the gradient (apply the incidence matrix E10) to each
standard 0-form.
It returns a 3 by 3 tensor representing
((du_dx, du_dy, du_dz),
(dv_dx, dv_dy, dv_dz),
(dw_dx, dw_dy, dw_dz)).
Each value are 3d evaluated at *meshgrid(xi, eta, sigma, indexing='ij)
:param xi: 1d increasing array in [-1,1].
:param eta: 1d increasing array in [-1,1].
:param sigma: 1d increasing array in [-1,1].
"""
assert np.ndim(xi) == 1 and np.all( | np.diff(xi) | numpy.diff |
# external imports
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import block_diag
# internal inputs
from pympc.dynamics.discrete_time_systems import AffineSystem, PieceWiseAffineSystem
from pympc.optimization.parametric_programs import MultiParametricQuadraticProgram, MultiParametricMixedIntegerQuadraticProgram
from pympc.optimization.programs import linear_program
class ModelPredictiveController(object):
"""
Model predictive controller for linear systems, it solves the optimal control problem
V*(x(0)) := min_{x(.), u(.)} 1/2 sum_{t=0}^{N-1} x'(t) Q x(t) + u'(t) R u(t) + \frac{1}{2} x'(N) P x(N)
s.t. x(t+1) = A x(t) + B u(t), t=0, ..., N-1,
(x(t), u(t)) in D, t=0, ..., N-1,
x(N) in X_N,
in order to get the optimal control seuquence (u(0), ..., u(N-1)).
"""
def __init__(self, S, N, Q, R, P, D, X_N):
"""
Initilizes the controller.
Arguments
----------
S : instance of LinerSystem
Linear system to be controlled.
N : int
Horizon of the optimal control problem.
Q : numpy.ndarray
Quadratic cost for the state.
R : numpy.ndarray
Quadratic cost for the input.
P : numpy.ndarray
Quadratic cost for the terminal state.
D : instance of Polyhedron
Stage constraint for state and inputs.
X_N : instance of Polyhedron
Terminal set.
"""
# store inputs
self.S = S
self.N = N
self.Q = Q
self.R = R
self.P = P
self.D = D
self.X_N = X_N
# initilize explicit solution
self.explicit_solution = None
# condense mpqp
self.mpqp = self._condense_program()
def _condense_program(self):
"""
Generates and stores the optimal control problem in condensed form.
Returns
----------
instance of MultiParametricQuadraticProgram
Condensed mpQP.
"""
# create fake PWA system and use PWA condenser
c = np.zeros((self.S.nx, 1))
S = AffineSystem(self.S.A, self.S.B, c)
S = PieceWiseAffineSystem([S], [self.D])
mode_sequence = [0]*self.N
return condense_optimal_control_problem(S, self.Q, self.R, self.P, self.X_N, mode_sequence)
def feedforward(self, x):
"""
Given the state x of the system, returns the optimal sequence of N inputs and the related cost.
Arguments
----------
x : numpy.ndarray
State of the system.
Returns
----------
u_feedforward : list of numpy.ndarray
Optimal control signals for t = 0, ..., N-1.
V : float
Optimal value function for the given state.
"""
# solve and check feasibility
sol = self.mpqp.solve(x)
if sol['min'] is None:
return None, None
# from vector to list of vectors
u_feedforward = [sol['argmin'][self.S.nu*i : self.S.nu*(i+1), :] for i in range(self.N)]
V = sol['min']
return u_feedforward, V
def feedback(self, x):
"""
Returns the optimal feedback for the given state x.
Arguments
----------
x : numpy.ndarray
State of the system.
Returns
----------
u_feedback : numpy.ndarray
Optimal feedback.
"""
# get feedforward and extract first input
u_feedforward = self.feedforward(x)[0]
if u_feedforward is None:
return None
return u_feedforward[0]
def store_explicit_solution(self, **kwargs):
"""
Solves the mpqp (condensed optimal control problem) explicitly.
Returns
----------
instance of ExplicitSolution
Explicit solution of the underlying mpqp problem.
"""
self.explicit_solution = self.mpqp.explicit_solve(**kwargs)
def feedforward_explicit(self, x):
"""
Finds the critical region where the state x is and returns the optimal feedforward and the cost to go.
Arguments
----------
x : numpy.ndarray
State of the system.
Returns
----------
u_feedforward : list of numpy.ndarray
Optimal control signals for t = 0, ..., N-1.
V : float
Optimal value function for the given state.
"""
# check that the explicit solution has been found
if self.explicit_solution is None:
raise ValueError('explicit solution not stored.')
# evaluate lookup table
u = self.explicit_solution.u(x)
if u is not None:
u = [u[t*self.S.nu:(t+1)*self.S.nu, :] for t in range(self.N)]
return u, self.explicit_solution.V(x)
def feedback_explicit(self, x):
"""
Finds the critical region where the state x is and returns the optimal feedback for the given state x.
Arguments
----------
x : numpy.ndarray
State of the system.
Returns
----------
u_feedback : numpy.ndarray
Optimal feedback.
"""
# get feedforward and extract first input
u_feedforward = self.feedforward_explicit(x)[0]
if u_feedforward is None:
return None
return u_feedforward[0]
def plot_state_space_partition(self, print_active_set=False, **kwargs):
"""
Finds the critical region where the state x is, and returns the PWA feedforward.
Arguments
----------
print_active_set : bool
If True it prints the active set of each critical region in its center.
"""
# check that the required plot is 2d and that the solution is available
if self.S.nx != 2:
raise ValueError('can plot only 2-dimensional partitions.')
if self.explicit_solution is None:
raise ValueError('explicit solution not stored.')
# plot every critical region with random colors
for cr in self.explicit_solution.critical_regions:
cr.polyhedron.plot(facecolor=np.random.rand(3), **kwargs)
# if required print active sets
if print_active_set:
plt.text(cr.polyhedron.center[0], cr.polyhedron.center[1], str(cr.active_set))
def plot_optimal_value_function(self, resolution=100, **kwargs):
"""
Plots the level sets of the optimal value function V*(x).
Arguments
----------
resolution : float
Size of the grid for the contour plot.
"""
# check dimension of the state
if self.S.nx != 2:
raise ValueError('can plot only 2-dimensional value functions.')
if self.explicit_solution is None:
raise ValueError('explicit solution not stored.')
# get feasible set
feasible_set = self.mpqp.get_feasible_set()
# create box containing the feasible set
x_max = max([v[0,0] for v in feasible_set.vertices])
x_min = min([v[0,0] for v in feasible_set.vertices])
y_max = max([v[1,0] for v in feasible_set.vertices])
y_min = min([v[1,0] for v in feasible_set.vertices])
# create grid
x = np.linspace(x_min, x_max, resolution)
y = np.linspace(y_min, y_max, resolution)
X, Y = np.meshgrid(x, y)
# evaluate grid
zs = np.array([self.explicit_solution.V(np.array([[x],[y]])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
# plot
feasible_set.plot(**kwargs)
cp = plt.contour(X, Y, Z)
plt.colorbar(cp)
plt.title(r'$V^*(x)$')
class HybridModelPredictiveController(object):
def __init__(self, S, N, Q, R, P, X_N):
"""
Initilizes the controller.
Arguments
----------
S : instance of PieceWiseAffineSystem
PWA system to be controlled.
N : int
Horizon of the optimal control problem.
Q : numpy.ndarray
Quadratic cost for the state.
R : numpy.ndarray
Quadratic cost for the input.
P : numpy.ndarray
Quadratic cost for the terminal state.
X_N : instance of Polyhedron
Terminal set.
"""
# store inputs
self.S = S
self.N = N
self.Q = Q
self.R = R
self.P = P
self.X_N = X_N
# get bigMs
self._alpha, self._beta = self._get_bigM_dynamics()
self._gamma = self._get_bigM_domains()
# condense miqp
self.mpmiqp = self._condense_program()
def _get_bigM_dynamics(self):
"""
Computes all the bigMs for the dynamics of the PWA system.
The PWA system has the dynamics
x(t+1) = A_i x(t) + B_i u(t) + c_i if (x(t),u(t)) in D_i,
where i in {1, ..., s}.
In order to express it in mixed-integer form, for t = 0, ..., N-1, we introduce the auxiliary variables z_i(t), and we set
x(t+1) = sum_{i=1}^s z_i(t).
We now reformulate the dynamics as
z_i(t) >= alpha_ii delta_i(t), (1)
z_i(t) <= beta_ii delta_i(t), (2)
A_i x(t) + B_i u(t) + c_i - z_i(t) >= sum_{j=1, j!=i}^s alpha_ij delta_j(t), (3)
A_i x(t) + B_i u(t) + c_i - z_i(t) <= sum_{j=1, j!=i}^s beta_ij delta_j(t). (4)
Here alpha_ij (<< 0) and beta_ij (>> 0) are both vectors of bigMs and delta_j(t) is a binary variable (equal to 1 if the system is in mode j, zero otherwise).
If the system is in mode k at time t (i.e. delta_k(t) = 1), we have that
z_i(t) = 0, for all i != k,
z_k(t) >= alpha_kk,
z_k(t) <= beta_kk,
A_i x(t) + B_i u(t) + c_i - z_i(t) >= alpha_ik, for all i != k,
A_i x(t) + B_i u(t) + c_i - z_i(t) <= beta_ik, for all i != k,
A_k x(t) + B_k u(t) + c_k = z_k(t),
that sets
x(t+1) = z_k(t) = A_k x(t) + B_k u(t) + c_k
as desired.
It is very important to choose the bigMs as tight as possible, for this reason we set
alpha_ij := min_{(x,u) in D_j} A_i x + B_i u + c_i,
beta_ij := max_{(x,u) in D_j} A_i x + B_i u + c_i.
(Note that the previous are a number of LPs equal to the number of states.)
The previous ensures that when the system is mode j != i, the dynamics A_i x + B_i u + c_i is lower bounded by alpha_ik and upper bounded by beta_ij.
Returns
----------
alpha : list of lists of numpy.ndarray
alpha[i][j] is the vector alpha_ij defined above.
beta : list of lists of numpy.ndarray
beta[i][j] is the vector beta_ij defined above.
"""
# initialize list of bigMs
alpha = []
beta = []
# outer loop over the number of affine systems
for i, S_i in enumerate(self.S.affine_systems):
alpha_i = []
beta_i = []
A_i = np.hstack((S_i.A, S_i.B))
# inner loop over the number of affine systems
for j, S_j in enumerate(self.S.affine_systems):
alpha_ij = []
beta_ij = []
D_j = self.S.domains[j]
# solve two LPs for each component of the state vector
for k in range(S_i.nx):
f = A_i[k:k+1,:].T
sol = linear_program(f, D_j.A, D_j.b, D_j.C, D_j.d)
alpha_ij.append(sol['min'] + S_i.c[k,0])
sol = linear_program(-f, D_j.A, D_j.b, D_j.C, D_j.d)
beta_ij.append(- sol['min'] + S_i.c[k,0])
# close inner loop appending bigMs
alpha_i.append(np.vstack(alpha_ij))
beta_i.append( | np.vstack(beta_ij) | numpy.vstack |
#!/usr/bin/python
"""
Convert planeflight output to NetCDF form en masse. A list of variables can be provided to restrict output
This function is written to convert any pf output to NetCDF.
NOTES:
- if GRD output is provided, 3D output can be obtained ( also in NetCDF form )
"""
import glob
import time
import os.path
import sys
import numpy as np
from pandas import DataFrame
import AC_tools as AC
# --- Master settings for main call
# Verbose/debug output? (set debug=True)
DEBUG = True
VERBOSE = False
# Use planeflight files ending with ".out", which have been renumerated
# (Fortran string formatting cuts off output > 5 digits )
renumerated = True # True#False#True # Use
# make 3D gridded output netCDF?
GRD_input_3D = True # False#True
# Are there mulitple sites?
Multiple_sites = False # True # NOTE: this option is not currently working
def main(wd, vars=None, npwd=None, GRD_input_3D=False, renumerated=False,
verbose=False, debug=False):
"""
Driver to process planeflight output from GEOS-Chem
NOTES:
---
- more details on GEOS-Chem's planeflight diagnostic:
(http://acmg.seas.harvard.edu/geos/doc/man/chapter_13.html)
"""
# Get save directory and set output NC name
import os
if not isinstance(npwd, str):
npwd = get_dir('npwd')
out_nc = npwd + 'pf_{}_{}.nc'.format(wd.split('/')[-3],
wd.split('/')[-2], wd.split('/')[-1])
print(("Atempting to append/create file (already exists?:{}): {}".format(
os.path.isfile(out_nc), out_nc)))
# Get pf files
if not os.path.isfile(out_nc):
files = get_pf_files(wd, renumerated=renumerated)
# Make NetCDF as table of all pf files. ( check for file first )
if not os.path.isfile(out_nc):
mk_NetCDF_of_pf_files(files, ncfilename=out_nc, debug=debug)
# If 2D data, make 3D (lon, lat, time) NetCDF file
if GRD_input_3D:
make_3D_NetCDF(ncfilename=out_nc, wd=wd, debug=debug)
# Process multiple sites to "subgrouped" NetCDF file
# NOTE: this is currently not functioning ... TODO
if Multiple_sites:
make_2D_subgroup_NetCDF(ncfilename=out_nc, wd=wd, debug=debug)
def get_pf_files(wd, renumerated=False, debug=False):
"""
Get pf files - edit this give location of planeflight files
"""
# Ensure working dorectory string has leading foreward slash
if wd[-1] != '/':
wd += '/'
# Get files
filenames = '/plane_flight_logs/plane*log*'
if renumerated:
filenames += '.out'
files = glob.glob(wd + filenames)
if debug:
print((wd, filenames, files))
return files
def mk_NetCDF_of_pf_files(files, ncfilename=None, debug=False):
"""
Make a table like NetCDF file from to any pf output
"""
# --- Setup NetCDF file
ncfile = Dataset(ncfilename, 'w', format='NETCDF4')
# --- Loop files, read in and add to NetCDF
npoint = 1
for n, file in enumerate(files):
# If 1st file setup NetCDF
if n == 0:
# Get Header infomation from first file
vars, sites = get_pf_headers(files[0], debug=debug)
# Extract all points from file
df, vars = AC.pf_csv2pandas(file=file, vars=vars, epoch=True,
r_vars=True)
if debug:
print((df.shape, df.columns))
# set unlimited data points dimension (POINT)
POINT = ncfile.createDimension('POINT', None)
# loop and create variables for each column (exc. last )
if debug:
print(vars)
[ncfile.createVariable(var, var2type(var), ('POINT'))
for var in vars]
# close the file
ncfile.close()
else:
# Extract all points from file
df, vars = AC.pf_csv2pandas(file=file, vars=vars, epoch=True,
r_vars=True)
# Open the file in append mode
ncfile = Dataset(ncfilename, 'a', format='NETCDF4')
if debug:
print((df.index))
# Fill variables for given
dim_len = len(df.index)
for var in vars:
ncfile.variables[var][npoint:npoint+dim_len] = df[var].values
# Tidy up and count
npoint += dim_len
del df
ncfile.close()
def var2type(var, debug=False):
""" Insure that strings are i8 type, add additions to list
for a NetCDF, type must be:
'f4' (32-bit floating point),
'f8' (64-bit floating point),
'i4' (32-bit signed integer),
'i2' (16-bit signed integer),
'i8' (64-bit singed integer),
'i1' (8-bit signed integer),
'u1' (8-bit unsigned integer),
'u2' (16-bit unsigned integer),
'u4' (32-bit unsigned integer),
'u8' (64-bit unsigned integer), or
'S1' (single-character string)
... Also: also a 'S' datatype for variable length strings ( ==numpy object)
"""
if any([i in var for i in ['TYPE', 'Epoch']]):
case = 2
elif any([i in var for i in ['LOC']]):
case = 3
else:
case = 1
cases = {
1: 'f8', # 'f8' (64-bit floating point),
2: 'i8', # 'i8' (64-bit singed integer),
# 3: 'S' # also a 'S' datatype for variable length strings ( numpy object)
3: 'S1'
}
return cases[case]
def get_3D_vars(vars):
""" from a list of pf variables, remove known 2D varables """
known2D = ['Epoch', 'LON', 'LAT', 'YYYYMMDD', 'LOC', 'HHMM', 'POINT']
return [i for i in vars if (i not in known2D)]
def get_site_description_vars(vars):
""" from a list of pf variables, get known site specific varables """
known2D = [
'Epoch', 'LON', 'LAT', 'YYYYMMDD', 'LOC', 'HHMM', 'POINT', 'PRESS'
]
return [i for i in vars if (i not in known2D)]
def make_3D_NetCDF(ncfilename, wd, debug=False):
""" Create NetCDF of 3D arrays for all variables in 2D NetCDF file
Takes a table form NetCDF and build 3D arrays from lat and lon
in the given file. """
# --- Read existing & setup new NetCDF file
ncfile2D = Dataset(ncfilename, 'r', format='NETCDF4')
# Setup dimensions
vars = ncfile2D.variables
if debug:
print([i for i in vars])
lats, lons, Epoch = [ncfile2D[i] for i in ('LAT', 'LON', 'Epoch')]
if debug:
print([len(i) for i in (lats, lons, Epoch)])
lats, lons, Epoch = [np.array(i) for i in (lats, lons, Epoch)]
lats, lons = [list(sorted(set(i))) for i in (lats, lons)]
# remove fill value. ( 9.969209968386869e+36 ) <= Improve this approach...
[i.pop(-1) for i in (lons, lats)]
# setup 3D NetCDF file
ncfilename = ncfilename.split('.nc')[0]+'_3D.nc'
ncfile = Dataset(ncfilename, 'w', format='NETCDF4')
ncfile.createDimension('lat', len(lats))
ncfile.createDimension('lon', len(lons))
ncfile.createDimension('time', None)
# Define the coordinate variables. They will hold the coordinate
# information, that is, the latitudes and longitudes.
time = ncfile.createVariable('time', 'f4', ('time',))
lat = ncfile.createVariable('lat', 'f4', ('lat',))
lon = ncfile.createVariable('lon', 'f4', ('lon',))
# --- Add meta data
# Assign units attributes to coordinate var data. This attaches a
# text attribute to each of the coordinate variables, containing the
# units.
lat.units = 'degrees_north'
lat.long_name = 'Latitude'
lat.standard_name = 'Latitude'
lat.axis = "Y"
lon.units = 'degrees_east'
lon.long_name = 'Longitude'
lon.standard_name = 'Longitude'
lon.axis = "X"
time.units = 'seconds since 1970-01-01 00:00:00'
time.calendar = "standard"
time.standard_name = 'Time'
time.axis = "T"
# set global varibles
ncfile.Description = 'planeflight output from '.format(wd)
ncfile.Contact = '<NAME> (<EMAIL>)'
# ncfile.History = 'Created {}'.format( time.ctime(time.time()) )
ncfile.Grid = 'lat: {}-{}, lon: {}-{}'.format(lats[0], lats[-1],
lons[0], lons[-1])
# ncfile.Temp_Res = "Hourly"
# ncfile.SpatialCoverage='Global'
# write data to coordinate vars.
lon[:] = lons
lat[:] = lats
# Get unique timesteps
timesteps = sorted(set(Epoch))
# masked 1st value ( headers? )
timesteps.pop(0)
# set time dimension to timestep values
time[:] = timesteps
# select only 3D vars
vars3D = get_3D_vars(vars)
# --- Loop 3D species and create variables (with set dimensions)
for var in vars3D:
ncfile.createVariable(var, var2type(var), ('time', 'lat', 'lon'), )
# close NetCDF
ncfile.close()
# --- Loop through timesteps (epoch) and add to NetCDF
# Loop over timesteps
for t in timesteps:
# open NetCDF in append mode
ncfile = Dataset(ncfilename, 'a', format='NETCDF4')
# get 1st and last indices for time stamp
start, end = [(i.min(), i.max())
for i in np.where(ncfile2D.variables['Epoch'] == t)][0]
# Extract Data for timestep & species
for var in vars3D:
data_ = ncfile2D.variables[var][start:end]
lons_ = ncfile2D.variables['LON'][start:end]
lats_ = ncfile2D.variables['LAT'][start:end]
if debug:
print((t, var, [i.shape for i in (data_, lats_, lons_)]))
# stack data by LAT/LON to 3D array ( using pandas )
df = DataFrame(data_, index=[lats_, lons_]).unstack()
# add data to array
ncfile.variables[var][timesteps.index(t)] = df.values
# remove from memory
del df, data_, lats_, lons_
# Save out final NetCDF file
ncfile.close()
def make_2D_subgroup_NetCDF(ncfilename, wd, debug=False):
""" Create NetCDF of 2D arrays for all variables in 2D NetCDF file by
subgroup ( e.g. multiple sites outputted for at same interval )
Takes a table form NetCDF and build 2D arrays per unit 'POINT'
from sites in the given file.
"""
# --- Read existing & setup new NetCDF file
ncfile2D = Dataset(ncfilename, 'r', format='NETCDF4')
# Setup dimensions
vars = ncfile2D.variables
if debug:
print([i for i in vars])
# Get tim in files
Epoch, LOC = [np.array(ncfile2D[i]) for i in ('Epoch', 'LOC')]
if debug:
print([len(i) for i in [Epoch]])
Epoch, LOC = [list(sorted(set(i))) for i in (Epoch, LOC)]
# remove 1st empty LOC entry
LOC = LOC[1:]
# setup 3D NetCDF file
ncfilename = ncfilename.split('.nc')[0]+'_2D_by_site.nc'
ncfile = Dataset(ncfilename, 'w', format='NETCDF4')
ncfile.createDimension('time', None)
# Define the coordinate variables. They will hold the coordinate
# information, that is, the time
time = ncfile.createVariable('time', 'f4', ('time',))
# --- Add meta data
# Assign units attributes to coordinate var data. This attaches a
# text attribute to each of the coordinate variables, containing the
# units.
# Loop each unique variable and add meta data
time.units = 'seconds since 1970-01-01 00:00:00'
time.calendar = "standard"
time.standard_name = 'Time'
time.axis = "T"
# set global varibles
# ncfile.Description = 'planeflight output from '.format( wd )
ncfile.Contact = '<NAME> (<EMAIL>)'
# ncfile.History = 'Created {}'.format( time.ctime(time.time()) )
# ncfile.Temp_Res = "Hourly"
# ncfile.SpatialCoverage='Global'
# Get unique timesteps
timesteps = sorted(set(Epoch))
# masked 1st value ( headers? )
timesteps.pop(0)
# set time dimension to timestep values
time[:] = timesteps
# --- Add shared variables
# select only 3D vars
# ( variables are referred to as 3D as the function was written for grid
# input. Here the table shape is altered but it remains 2D )
vars3D = get_site_description_vars(vars)
# --- Loop 3D species and create variables (with set dimensions)
for var in vars3D:
# Loop sites and add to beginning of variable
for site in LOC:
ncfile.createVariable(site+'_'+var, var2type(var), ('time'), )
# close NetCDF
ncfile.close()
# --- Loop through timesteps (epoch) and add to NetCDF
# Loop over timesteps in chunks
chunk_size = 7*24 # Note for 365 days, this does make equally sized chunks
# chunk_size = 24 # Note for 365 days, this does make equally sized chunks
ts_chunks = chunks(timesteps, chunk_size)
# check
for ts_chunk in ts_chunks:
# Open NetCDF in append mode
ncfile = Dataset(ncfilename, 'a', format='NETCDF4')
if debug:
print((ts_chunk[0], timesteps[0]))
# Get 1st and last indices for time stamp chunk
start = np.where(ncfile2D.variables['Epoch'] == ts_chunk[0])
start = | np.array(start) | numpy.array |
# The MIT License (MIT)
# Copyright (c) 2020-2021 CoML
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME>, <NAME>, <NAME>
from abc import ABCMeta, abstractmethod
from typing import Optional, Iterable, List
import numpy as np
import pyannote.core.segment
from pyannote.core import Segment
from sortedcontainers import SortedSet
from typing_extensions import Literal
from .continuum import Continuum, Annotator
PivotType = Literal["float_pivot", "int_pivot"]
class AbstractContinuumSampler(metaclass=ABCMeta):
"""
Tool for generating sampled continuua from a reference continuum.
Used to compute the "expected disorder" when calculating the gamma,
using particular sampling techniques.
Must be initalized (with self.init_sampling for instance)
"""
_reference_continuum: Optional[Continuum]
_ground_truth_annotators: Optional[SortedSet]
def __init__(self):
"""Super constructor, sets everything to None since a call to init_sampling to set
parameters is mandatory."""
self._reference_continuum = None
self._ground_truth_annotators = None
def init_sampling(self, reference_continuum: Continuum,
ground_truth_annotators: Optional[Iterable['Annotator']] = None):
"""
Parameters
----------
reference_continuum: Continuum
the continuum that will be shuffled into the samples
ground_truth_annotators: iterable of str, optional
the set of annotators (from the reference) that will be considered for sampling
"""
assert reference_continuum, "Cannot initialize sampling with an empty reference continuum."
self._reference_continuum = reference_continuum
if ground_truth_annotators is None:
self._ground_truth_annotators = self._reference_continuum.annotators
else:
assert self._reference_continuum.annotators.issuperset(ground_truth_annotators), \
"Can't sample from ground truth annotators not in the reference continuum."
self._ground_truth_annotators = SortedSet(ground_truth_annotators)
def _has_been_init(self):
assert self._reference_continuum is not None, \
"Sampler hasnt been initialized. Call 'sampler.init_sampling' before 'sampler.sample_from_continuum'."
@property
@abstractmethod
def sample_from_continuum(self) -> Continuum:
"""
Returns a shuffled continuum based on the reference.
Everything in the generated sample is at least a copy.
Raises
------
ValueError:
if `init_sampling` or another initalization method hasn't been called before.
"""
pass
class ShuffleContinuumSampler(AbstractContinuumSampler):
"""
This continuum sampler uses the methods used in gamma-software, ie those described in
gamma-paper : https://www.aclweb.org/anthology/J15-3003.pdf, section 5.2.
and implemented in the GammaSoftware.
"""
_pivot_type: PivotType
def __init__(self, pivot_type: PivotType = 'int_pivot'):
"""This constructor allows to set the pivot type to int or float. Defaults to
int to match the java implementation."""
super().__init__()
self._pivot_type = pivot_type
def init_sampling(self, reference_continuum: Continuum,
ground_truth_annotators: Optional[Iterable['Annotator']] = None):
"""
Parameters
----------
reference_continuum: Continuum
the continuum that will be shuffled into the samples
ground_truth_annotators: iterable of str, optional
the set of annotators (from the reference) that will be considered for sampling
"""
super().init_sampling(reference_continuum, ground_truth_annotators)
@staticmethod
def _remove_pivot_segment(pivot: float, segments: List[Segment], dist: float) -> List[Segment]:
"""
Returns a copy of the given list of segments, minus the segment delimited by [pivot - dist, pivot + dist].
"""
new_segments = []
while len(segments) > 0:
segment = segments.pop()
if segment.start >= pivot - dist:
if segment.end <= pivot + dist:
continue
else:
new_segments.append(Segment(pivot + dist, segment.end))
else:
if segment.end > pivot + dist:
new_segments.append(Segment(segment.start, pivot - dist))
new_segments.append(Segment(pivot + dist, segment.end))
else:
new_segments.append(Segment(segment.start, pivot - dist))
return new_segments
def _random_from_segments(self, segments: List[Segment]) -> float:
"""
Returns a random value from the provided list of segments, by randomly choosing
a segment (weighted by its length) and then using uniform distribution in it.
"""
segments = np.array(segments)
weights = np.array(list(segment.end - segment.start for segment in segments))
weights /= np.sum(weights)
try:
segment = np.random.choice(np.array(segments), p=weights)
except ValueError:
return 1
if self._pivot_type == 'int_pivot':
return int(np.random.uniform(segment.start, segment.end))
else:
return np.random.uniform(segment.start, segment.end)
@property
def sample_from_continuum(self) -> Continuum:
self._has_been_init()
assert self._pivot_type in ('float_pivot', 'int_pivot')
continuum = self._reference_continuum
min_dist_between_pivots = continuum.avg_length_unit / 2
bound_inf, bound_sup = continuum.bounds
new_continuum = continuum.copy_flush()
annotators = self._ground_truth_annotators
while not new_continuum: # Simple check to prevent returning an empty continuum.
segments_available = [Segment(bound_inf, bound_sup)]
for idx in range(len(annotators)):
if len(segments_available) != 0:
pivot: float = self._random_from_segments(segments_available)
segments_available = self._remove_pivot_segment(pivot, segments_available, min_dist_between_pivots)
else:
pivot = np.random.uniform(bound_inf, bound_sup)
rnd_annotator = np.random.choice(annotators)
new_annotator = f'Sampled_annotation {idx}'
new_continuum.add_annotator(new_annotator)
for unit in continuum.iter_annotator(rnd_annotator):
if unit.segment.start + pivot > bound_sup:
new_continuum.add(new_annotator,
Segment(unit.segment.start + pivot + bound_inf - bound_sup,
unit.segment.end + pivot + bound_inf - bound_sup),
unit.annotation)
else:
new_continuum.add(new_annotator,
Segment(unit.segment.start + pivot,
unit.segment.end + pivot),
unit.annotation)
return new_continuum
class StatisticalContinuumSampler(AbstractContinuumSampler):
"""
This sampler creates continua using the average and standard deviation of :
- The number of annotations per annotator
- The gap between two of an annotator's annotations
- The duration of the annotations' segments
The sample is thus created by computing normal distributions using these parameters.
It also requires the probability of occurence of each annotations category. You can either initalize sampling with
custom values or with a reference continuum.
"""
_avg_nb_units_per_annotator: float
_std_nb_units_per_annotator: float
_avg_gap: float
_std_gap: float
_avg_unit_duration: float
_std_unit_duration: float
_categories: np.ndarray
_categories_weight: np.ndarray
def _set_gap_information(self):
# To prevent glitching continua with 1 unit
gaps = [0]
current_annotator = None
last_unit = None
for annotator, unit in self._reference_continuum:
if annotator != current_annotator:
current_annotator = annotator
else:
gaps.append(unit.segment.start - last_unit.segment.end)
last_unit = unit
for annotation_set in self._reference_continuum._annotations.values():
if len(annotation_set) == 0:
continue
if annotation_set[0].segment.start > 0:
gaps.append(annotation_set[0].segment.start)
self._avg_gap = float( | np.mean(gaps) | numpy.mean |
#!/usr/bin/env python
from __future__ import print_function
import os
from os.path import splitext, join, isfile, isdir, basename
import argparse
import numpy as np
# from scipy import misc, ndimage
import tensorflow.keras.backend as K
from tensorflow.keras.models import model_from_json, load_model
import tensorflow as tf
import layers_builder as layers
from glob import glob
from utils import utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
import cv2
import math
from PIL import Image
# -- Fix for macos, uncomment it
# import matplotlib
# matplotlib.use('TkAgg')
# --
import matplotlib.pyplot as plt
from ade20k_labels import ade20k_label_dict as ade20k_labels
from imageio import imread
# These are the means for the ImageNet pretrained ResNet
DATA_MEAN = np.array([[[123.68, 116.779, 103.939]]]) # RGB order
class PSPNet(object):
"""Pyramid Scene Parsing Network by <NAME> et al 2017"""
def __init__(self, nb_classes, resnet_layers, input_shape, weights):
self.input_shape = input_shape
self.num_classes = nb_classes
json_path = join("weights", "keras", weights + ".json")
h5_path = join("weights", "keras", weights + ".h5")
if 'pspnet' in weights:
if os.path.isfile(json_path) and os.path.isfile(h5_path):
print("Keras model & weights found, loading...")
with CustomObjectScope({'Interp': layers.Interp}):
with open(json_path) as file_handle:
self.model = model_from_json(file_handle.read())
self.model.load_weights(h5_path)
else:
print("No Keras model & weights found, import from npy weights.")
self.model = layers.build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
self.set_npy_weights(weights)
else:
print('Load pre-trained weights')
self.model = load_model(weights)
def predict(self, img, flip_evaluation=False):
"""
Predict segementation for an image.
Arguments:
img: must be rowsxcolsx3
"""
if img.shape[0:2] != self.input_shape:
print(
"Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (
img.shape[0:2], self.input_shape))
img = np.array(Image.fromarray(img).resize(size=self.input_shape))
# img = misc.imresize(img, self.input_shape)
img = img - DATA_MEAN
img = img[:, :, ::-1] # RGB => BGR
img = img.astype('float32')
probs = self.feed_forward(img, flip_evaluation)
return probs
def predict_sliding(self, full_img, flip_evaluation):
"""
Predict on tiles of exactly the network input shape.
This way nothing gets squeezed.
"""
tile_size = self.input_shape
classes = self.num_classes
overlap = 1 / 3
stride = math.ceil(tile_size[0] * (1 - overlap))
tile_rows = max(int(math.ceil((full_img.shape[0] - tile_size[0]) / stride) + 1), 1) # strided convolution formula
tile_cols = max(int(math.ceil((full_img.shape[1] - tile_size[1]) / stride) + 1), 1)
print("Need %i x %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, stride))
full_probs = | np.zeros((full_img.shape[0], full_img.shape[1], classes)) | numpy.zeros |
import logging
import h5py as h5
import numpy as np
import dfcs_vipa
log = logging.getLogger(__name__)
# * Collect arrays and comb teeth intensities
def collect_h5(path, path_dc):
"""Return all data from HDF5 DC measurements.
Subtracts dark current from the signal measurements.
Args:
- path, path_dc: paths to signal and dark measurement files.
Returns:
- 3D array with first dimension corresponds to different measurements
and the last two corresponding to camera array.
"""
with h5.File(path, 'r') as f:
with h5.File(path_dc, 'r') as fdc:
arrs = (f['data'][...].astype(np.int32)-fdc['data'][...].astype(np.int32))
return arrs
def average_h5(path, path_dc):
"""Return averaged data from HDF5 DC measurements.
Subtracts dark current from the signal measurements.
Args:
- path, path_dc: paths to signal and dark measurement files.
Returns:
- 2D array containing averaged and DC-subtracted measurement.
"""
with h5.File(path, 'r') as f:
with h5.File(path_dc, 'r') as fdc:
arr = (f['data'][...].mean(axis=0) -
fdc['data'][...].mean(axis=0))
return arr
def collect_element(path, path_dc, row, col, mask_cols, mask_rows=None):
"""Collect single comb tooth intensities from data arrays.
Args:
- path, path_dc: paths to signal and dark measurement files,
- row, col: position of the comb tooth,
- mask_cols, mask_rows: numpy fancy indexing arrays defining single comb
tooth pattern.
Returns:
- NumPy 1D array containing comb tooth intensities.
"""
# define the hyperslab
col_min, col_max = col + mask_cols.min(), col + mask_cols.max() + 1
if mask_rows is not None:
row_min, row_max = row + mask_rows.min(), row + mask_rows.max() + 1
else:
row_min, row_max = row, row + 1
# collect the hyperslab with the spectral element
with h5.File(path, 'r') as f:
with h5.File(path_dc, 'r') as f_dc:
element_array = (f['data'][..., row_min:row_max,
col_min:col_max].astype(np.int32) -
f_dc['data'][..., row_min:row_max,
col_min:col_max].astype(np.int32))
# retrieve the data for a single spectral element
if mask_rows is not None:
rows = mask_rows - np.min(mask_rows)
else:
rows = np.zeros(mask_cols.shape, dtype=mask_cols.dtype)
cols = mask_cols - np.min(mask_cols)
elements = element_array[..., rows, cols].sum(axis=-1)
return elements
def collect(arr, grid_fancy, mask_cols, mask_rows=None):
"""Collect comb teeth intensities from data array.
Args:
- arr: Numpy 2D array of (averaged) camera frame,
- grid_fancy: tuple of rows and cols array defining positions of the
comb teeth,
- mask_cols, mask_rows: numpy fancy indexing arrays defining single comb
tooth pattern.
"""
rows, cols = grid_fancy
cols = cols[:, np.newaxis] + mask_cols
if mask_rows is not None:
rows = rows[:, np.newaxis] + mask_rows
else:
rows = rows[:, np.newaxis]
elements = arr[..., rows, cols]
return elements.sum(axis=-1)
def collect_multi(ilist, fmt, fmt_dc):
"""Collect averaged camera arrays from multiple files.
Args:
- ilist: sequence which elements are formatted into fmt and fmt_dc to
get paths to measurement files,
- fmt, fmt_dc: format strings.
Returns:
- 3D array with first dimension corresponds to different measurements
and the last two corresponding to camera frame dimensions.
"""
ilength = len(ilist)
multi_arr = np.empty((ilength, dfcs_vipa.ROWS, dfcs_vipa.COLS))
for j, i in enumerate(ilist):
log.info("Averaging '{:s}'".format(fmt.format(i)))
multi_arr[j] = average_h5(fmt.format(i), fmt_dc.format(i))
return multi_arr
def collect_multi_single(ilist, fmt, fmt_dc):
"""Collect camera arrays from multiple files.
Parameters
----------
ilist: sequence
Sequence which elements are formatted into `fmt` and `fmt_dc` to
get paths to measurement files.
fmt : str
Format string for bright frame.
fmt_dc: str
Format string for dark frame.
Returns
-------
ndarray
4D array with first dimension corresponding to different
measurements, second dimension corresponding to different frames
and the last two corresponding to camera frame dimensions.
"""
ilength = len(ilist)
with h5.File(fmt.format(ilist[0]), 'r') as test_file:
frame_num = test_file['data'].shape[0]
multi_arr = np.empty((ilength, frame_num, dfcs_vipa.ROWS, dfcs_vipa.COLS))
for j, i in enumerate(ilist):
log.info("Collecting '{:s}'".format(fmt.format(i)))
multi_arr[j] = collect_h5(fmt.format(i), fmt_dc.format(i))
return multi_arr
def collect_multi_frep_scan(beat_range, scan_range, frep_range, fmt, fmt_dc,
alt=True):
"""Collect averaged camera arrays from different freps and scans.
The result is a nested dictionary with first level corresponding to
different freps, the second level corresponding to different scans
through the cavity modes. Each frep, scan pair corresponds to a 3D
NumPy array (a result of collect_multi) with first dimension numbering
different points on the cavity mode.
Args:
- beat_range: a list of ints numbering measurements within a cavity mode
scan,
- scan_range: a list of ints numbering independent scans of a cavity
mode,
- frep_range: a list of ints numbering different freps (jumps),
- fmt, fmt_dc: twice-nested format strings, the outer pattern
corresponds to the frep number, the inner one to scan-beat number,
- alt: if True then the direction of cavity mode scan is reversed every
second scan.
Returns:
- twice-nested dictionary (frep=>scan=>collect_multi).
"""
frep_arr_avgs = {}
for l in frep_range:
beat_arr_avgs = {}
for k in scan_range:
final_beat = (beat_range + k*40)
if alt:
if k % 2:
final_beat = final_beat[::-1]
beat_arr_avgs[k] = collect_multi(
final_beat,
fmt.format(l),
fmt_dc.format(l))
frep_arr_avgs[l] = beat_arr_avgs
return frep_arr_avgs
def collect_multi_frep_scan_single(beat_range, scan_range, frep_range, fmt, fmt_dc,
alt=True):
"""Collect camera arrays from different freps and scans.
The result is a nested dictionary with first level corresponding to
different freps, the second level corresponding to different scans
through the cavity modes and the third level corresponding to
different beat note. Each frep, scan pair corresponds to a 4D NumPy
array (a result of collect_multi_single) with first dimension
numbering different points on the cavity mode and the second one
numbering different frames.
Parameters
----------
beat_range: list of int
A list of ints numbering measurements within a cavity mode scan,
scan_range: list of int
A list of ints numbering independent scans of a cavity mode,
frep_range: list of int
A list of ints numbering different freps (jumps),
fmt : str
fmt_dc : str
Twice-nested format strings, the outer pattern corresponds to
the frep number, the inner one to scan-beat number,
alt : bool
If True then the direction of cavity mode scan is reversed every
second scan.
Returns
-------
dict
Twice-nested dictionary (frep=>scan=>beats), where `beats` is a
4D array (beat=>frame=>row=>col).
"""
frep_arr_avgs = {}
for l in frep_range:
beat_arr_avgs = {}
for k in scan_range:
final_beat = (beat_range + k*40)
if alt:
if k % 2:
final_beat = final_beat[::-1]
beat_arr_avgs[k] = collect_multi_single(
final_beat,
fmt.format(l),
fmt_dc.format(l))
frep_arr_avgs[l] = beat_arr_avgs
return frep_arr_avgs
def beat_range_list(beat_range, scan_range, alt=True):
for k in scan_range:
final_beat = (beat_range + k*40)
if alt:
if k % 2:
final_beat = final_beat[::-1]
for i in final_beat:
yield i
# * Noise analysis
def frame_stdevs_h5(path):
"""Return standard deviations of each frame.
"""
with h5.File(path, 'r') as f:
stdevs = frame_stdevs(f['data'][...])
return stdevs
def threshold_stdevs(stdevs, scale):
return stdevs.mean() + scale*np.std(stdevs)
def frame_stdevs(arr):
mean = arr.mean(axis=0)
return np.sum(np.sum((arr-mean)**2, axis=-1), axis=-1)
def spectra_stdevs(arr, grid_fancy, mask_cols, mask_rows=None):
spectra = collect(arr, grid_fancy, mask_cols, mask_rows)
spectra_mean = spectra.mean(axis=0)
return | np.sum((spectra-spectra_mean)**2, axis=-1) | numpy.sum |
"""add_pin adss a Pin to a port, add_pins adds Pins to all ports:
- pins
- outline
Some functions modify a component without changing its name.
Make sure these functions are inside a new Component or called as a decorator
They without modifying the cell name
"""
import json
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import gdspy
import numpy as np
from numpy import ndarray
from omegaconf import OmegaConf
from phidl.device_layout import Device as Component
from phidl.device_layout import DeviceReference as ComponentReference
from phidl.device_layout import Port
from gdsfactory.snap import snap_to_grid
Layer = Tuple[int, int]
Layers = Tuple[Layer, ...]
LayerSpec = Optional[Union[Layer, str, int]]
LayerSpecs = Tuple[LayerSpec, ...]
nm = 1e-3
def _rotate(v: ndarray, m: ndarray) -> ndarray:
return np.dot(m, v)
def get_pin_triangle_polygon_tip(port: Port) -> Tuple[List[float], Tuple[float, float]]:
"""Returns triangle polygon and tip position"""
p = port
orientation = p.orientation
if orientation is None:
raise ValueError("Port {port.name} needs to have an orientation.")
ca = np.cos(orientation * np.pi / 180)
sa = np.sin(orientation * np.pi / 180)
rot_mat = np.array([[ca, -sa], [sa, ca]])
d = p.width / 2
dbot = np.array([0, -d])
dtop = np.array([0, d])
dtip = np.array([d, 0])
p0 = p.position + _rotate(dbot, rot_mat)
p1 = p.position + _rotate(dtop, rot_mat)
ptip = p.position + _rotate(dtip, rot_mat)
polygon = [p0, p1, ptip]
polygon = np.stack(polygon)
return polygon, ptip
def add_pin_triangle(
component: Component,
port: Port,
layer: LayerSpec = "PORT",
layer_label: LayerSpec = "TEXT",
) -> None:
"""Add triangle pin with a right angle, pointing out of the port
Args:
component: to add pin.
port: Port.
layer: for the pin marker.
layer_label: for the label.
"""
polygon, ptip = get_pin_triangle_polygon_tip(port=port)
component.add_polygon(polygon, layer=layer)
if layer_label:
component.add_label(
text=str(port.name),
position=ptip,
layer=layer_label,
)
def add_pin_rectangle_inside(
component: Component,
port: Port,
pin_length: float = 0.1,
layer: LayerSpec = "PORT",
layer_label: LayerSpec = "TEXT",
) -> None:
"""Add square pin towards the inside of the port
Args:
component: to add pins.
port: Port.
pin_length: length of the pin marker for the port.
layer: for the pin marker.
layer_label: for the label.
.. code::
_______________
| |
| |
| |
|| |
|| |
| |
| __ |
|_______________|
"""
p = port
a = p.orientation
ca = np.cos(a * np.pi / 180)
sa = np.sin(a * np.pi / 180)
rot_mat = np.array([[ca, -sa], [sa, ca]])
d = p.width / 2
dbot = np.array([0, -d])
dtop = np.array([0, d])
dbotin = np.array([-pin_length, -d])
dtopin = np.array([-pin_length, +d])
p0 = p.position + _rotate(dbot, rot_mat)
p1 = p.position + _rotate(dtop, rot_mat)
ptopin = p.position + _rotate(dtopin, rot_mat)
pbotin = p.position + _rotate(dbotin, rot_mat)
polygon = [p0, p1, ptopin, pbotin]
component.add_polygon(polygon, layer=layer)
if layer_label:
component.add_label(
text=str(p.name),
position=p.midpoint,
layer=layer_label,
)
def add_pin_rectangle_double(
component: Component,
port: Port,
pin_length: float = 0.1,
layer: LayerSpec = "PORT",
layer_label: LayerSpec = "TEXT",
) -> None:
"""Add two square pins: one inside with label, one outside.
Args:
component: to add pins.
port: Port.
pin_length: length of the pin marker for the port.
layer: for the pin marker.
layer_label: for the label.
.. code::
_______________
| |
| |
| |
||| |
||| |
| |
| __ |
|_______________|
__
"""
p = port
a = p.orientation
ca = np.cos(a * np.pi / 180)
sa = np.sin(a * np.pi / 180)
rot_mat = | np.array([[ca, -sa], [sa, ca]]) | numpy.array |
"""Test for Conductivity_RTA.py."""
import numpy as np
# first list is k_P, second list is k_C
si_pbesol_kappa_P_RTA = [
108.723,
108.723,
108.723,
0.000,
0.000,
0.000,
] # old value 107.991
si_pbesol_kappa_C = [0.167, 0.167, 0.167, 0.000, 0.000, 0.000]
si_pbesol_kappa_P_RTA_iso = [
97.494,
97.494,
97.494,
0.000,
0.000,
0.000,
] # old value 96.92419
si_pbesol_kappa_C_iso = [0.177, 0.177, 0.177, 0.000, 0.000, 0.000]
si_pbesol_kappa_P_RTA_with_sigmas = [
110.534,
110.534,
110.534,
0,
0,
0,
] # old value 109.6985
si_pbesol_kappa_C_with_sigmas = [0.163, 0.163, 0.163, 0.000, 0.000, 0.000]
si_pbesol_kappa_P_RTA_with_sigmas_iso = [
97.268,
97.268,
97.268,
0,
0,
0,
] # old value 96.03248
si_pbesol_kappa_C_with_sigmas_iso = [0.179, 0.179, 0.179, 0.000, 0.000, 0.000]
si_pbesol_kappa_P_RTA_si_nosym = [
39.325, # old value 38.242347
39.323, # old value 38.700219
39.496, # old value 39.198018
-0.004, # old value 0.3216,
0.020, # old value 0.207731,
0.018, # old value 0.283,
]
si_pbesol_kappa_C_si_nosym = [0.009, 0.009, 0.009, 0.000, 0.000, 0.000]
si_pbesol_kappa_P_RTA_si_nomeshsym = [
39.411,
39.411,
39.411,
0,
0,
0,
] # old value 38.90918
si_pbesol_kappa_C_si_nomeshsym = [0.009, 0.009, 0.009, 0.000, 0.000, 0.000]
nacl_pbe_kappa_P_RTA = [
7.753,
7.753,
7.753,
0.000,
0.000,
0.000,
] # old value 7.72798252
nacl_pbe_kappa_C = [0.081, 0.081, 0.081, 0.000, 0.000, 0.000]
nacl_pbe_kappa_RTA_with_sigma = [7.742, 7.742, 7.742, 0, 0, 0] # old value 7.71913708
nacl_pbe_kappa_C_with_sigma = [0.081, 0.081, 0.081, 0.000, 0.000, 0.000]
aln_lda_kappa_P_RTA = [
203.304,
203.304,
213.003,
0,
0,
0,
] # old value [203.304059, 203.304059, 213.003125, 0, 0, 0]
aln_lda_kappa_C = [0.084, 0.084, 0.037, 0, 0, 0]
aln_lda_kappa_P_RTA_with_sigmas = [
213.820000,
213.820000,
224.800000,
0,
0,
0,
] # old value [213.820000, 213.820000, 224.800121, 0, 0, 0]
aln_lda_kappa_C_with_sigmas = [0.084, 0.084, 0.036, 0, 0, 0]
def test_kappa_RTA_si(si_pbesol):
"""Test RTA by Si."""
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol, [9, 9, 9])
np.testing.assert_allclose(si_pbesol_kappa_P_RTA, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(si_pbesol_kappa_C, kappa_C, atol=0.02)
def test_kappa_RTA_si_full_pp(si_pbesol):
"""Test RTA with full-pp by Si."""
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol, [9, 9, 9], is_full_pp=True)
np.testing.assert_allclose(si_pbesol_kappa_P_RTA, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(si_pbesol_kappa_C, kappa_C, atol=0.02)
def test_kappa_RTA_si_iso(si_pbesol):
"""Test RTA with isotope scattering by Si."""
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol, [9, 9, 9], is_isotope=True)
np.testing.assert_allclose(si_pbesol_kappa_P_RTA_iso, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(si_pbesol_kappa_C_iso, kappa_C, atol=0.02)
def test_kappa_RTA_si_with_sigma(si_pbesol):
"""Test RTA with smearing method by Si."""
si_pbesol.sigmas = [
0.1,
]
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol, [9, 9, 9])
np.testing.assert_allclose(si_pbesol_kappa_P_RTA_with_sigmas, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(si_pbesol_kappa_C_with_sigmas, kappa_C, atol=0.02)
si_pbesol.sigmas = None
def test_kappa_RTA_si_with_sigma_full_pp(si_pbesol):
"""Test RTA with smearing method and full-pp by Si."""
si_pbesol.sigmas = [
0.1,
]
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol, [9, 9, 9], is_full_pp=True)
np.testing.assert_allclose(si_pbesol_kappa_P_RTA_with_sigmas, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(si_pbesol_kappa_C_with_sigmas, kappa_C, atol=0.02)
si_pbesol.sigmas = None
def test_kappa_RTA_si_with_sigma_iso(si_pbesol):
"""Test RTA with smearing method and isotope scattering by Si."""
si_pbesol.sigmas = [
0.1,
]
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol, [9, 9, 9], is_isotope=True)
np.testing.assert_allclose(
si_pbesol_kappa_P_RTA_with_sigmas_iso, kappa_P_RTA, atol=0.5
)
np.testing.assert_allclose(si_pbesol_kappa_C_with_sigmas_iso, kappa_C, atol=0.02)
si_pbesol.sigmas = None
def test_kappa_RTA_si_compact_fc(si_pbesol_compact_fc):
"""Test RTA with compact-fc by Si."""
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol_compact_fc, [9, 9, 9])
np.testing.assert_allclose(si_pbesol_kappa_P_RTA, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(si_pbesol_kappa_C, kappa_C, atol=0.02)
def test_kappa_RTA_si_nosym(si_pbesol, si_pbesol_nosym):
"""Test RTA without considering symmetry by Si."""
si_pbesol_nosym.fc2 = si_pbesol.fc2
si_pbesol_nosym.fc3 = si_pbesol.fc3
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol_nosym, [4, 4, 4])
kappa_P_RTA_r = kappa_P_RTA.reshape(-1, 3).sum(axis=1)
kappa_C_r = kappa_C.reshape(-1, 3).sum(axis=1)
kappa_P_ref = np.reshape(si_pbesol_kappa_P_RTA_si_nosym, (-1, 3)).sum(axis=1)
kappa_C_ref = np.reshape(si_pbesol_kappa_C_si_nosym, (-1, 3)).sum(axis=1)
np.testing.assert_allclose(kappa_P_ref / 3, kappa_P_RTA_r / 3, atol=0.8)
np.testing.assert_allclose(kappa_C_ref / 3, kappa_C_r / 3, atol=0.02)
def test_kappa_RTA_si_nomeshsym(si_pbesol, si_pbesol_nomeshsym):
"""Test RTA without considering mesh symmetry by Si."""
si_pbesol_nomeshsym.fc2 = si_pbesol.fc2
si_pbesol_nomeshsym.fc3 = si_pbesol.fc3
kappa_P_RTA, kappa_C = _get_kappa_RTA(si_pbesol_nomeshsym, [4, 4, 4])
np.testing.assert_allclose(
si_pbesol_kappa_P_RTA_si_nomeshsym, kappa_P_RTA, atol=0.5
)
np.testing.assert_allclose(si_pbesol_kappa_C_si_nomeshsym, kappa_C, atol=0.02)
def test_kappa_RTA_si_N_U(si_pbesol):
"""Test RTA with N and U scatterings by Si."""
ph3 = si_pbesol
mesh = [4, 4, 4]
is_N_U = True
ph3.mesh_numbers = mesh
ph3.init_phph_interaction()
ph3.run_thermal_conductivity(
temperatures=[
300,
],
is_N_U=is_N_U,
conductivity_type="wigner",
)
gN, gU = ph3.thermal_conductivity.get_gamma_N_U()
gN_ref = [
0.00000000,
0.00000000,
0.00000000,
0.07402084,
0.07402084,
0.07402084,
0.00078535,
0.00078535,
0.00917995,
0.02178049,
0.04470075,
0.04470075,
0.00173337,
0.00173337,
0.01240191,
0.00198981,
0.03165195,
0.03165195,
0.00224713,
0.00224713,
0.00860026,
0.03083611,
0.03083611,
0.02142118,
0.00277534,
0.00330170,
0.02727451,
0.00356415,
0.01847744,
0.01320643,
0.00155072,
0.00365611,
0.01641919,
0.00650083,
0.02576069,
0.01161589,
0.00411969,
0.00411969,
0.00168211,
0.00168211,
0.01560092,
0.01560092,
0.00620091,
0.00620091,
0.03764912,
0.03764912,
0.02668523,
0.02668523,
]
gU_ref = [
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00015178,
0.00015178,
0.00076936,
0.00727539,
0.00113112,
0.00113112,
0.00022696,
0.00022696,
0.00072558,
0.00000108,
0.00021968,
0.00021968,
0.00079397,
0.00079397,
0.00111068,
0.00424761,
0.00424761,
0.00697760,
0.00221593,
0.00259510,
0.01996296,
0.00498962,
0.01258375,
0.00513825,
0.00148802,
0.00161955,
0.01589219,
0.00646134,
0.00577275,
0.00849711,
0.00313208,
0.00313208,
0.00036610,
0.00036610,
0.01135335,
0.01135335,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
]
# print(np.sum(gN), np.sum(gU))
np.testing.assert_allclose(
np.sum([gN_ref, gU_ref], axis=0), gN.ravel() + gU.ravel(), atol=1e-2
)
np.testing.assert_allclose(gN_ref, gN.ravel(), atol=1e-2)
np.testing.assert_allclose(gU_ref, gU.ravel(), atol=1e-2)
def test_kappa_RTA_nacl(nacl_pbe):
"""Test RTA by NaCl."""
kappa_P_RTA, kappa_C = _get_kappa_RTA(nacl_pbe, [9, 9, 9])
np.testing.assert_allclose(nacl_pbe_kappa_P_RTA, kappa_P_RTA, atol=0.5)
np.testing.assert_allclose(nacl_pbe_kappa_C, kappa_C, atol=0.02)
def test_kappa_RTA_nacl_with_sigma(nacl_pbe):
"""Test RTA with smearing method by NaCl."""
nacl_pbe.sigmas = [
0.1,
]
nacl_pbe.sigma_cutoff = 3
kappa_P_RTA, kappa_C = _get_kappa_RTA(nacl_pbe, [9, 9, 9])
np.testing.assert_allclose(nacl_pbe_kappa_RTA_with_sigma, kappa_P_RTA, atol=0.5)
| np.testing.assert_allclose(nacl_pbe_kappa_C_with_sigma, kappa_C, atol=0.02) | numpy.testing.assert_allclose |
from __future__ import division
import unittest
from inferelator import utils
from inferelator.postprocessing import GOLD_STANDARD_COLUMN, CONFIDENCE_COLUMN, TARGET_COLUMN, REGULATOR_COLUMN
from inferelator.postprocessing import results_processor
from inferelator.postprocessing import results_processor_mtl
from inferelator.postprocessing import MetricHandler, RankSummingMetric
import pandas as pd
import pandas.testing as pdt
import numpy as np
import os
import tempfile
import shutil
import logging
logging.getLogger('matplotlib').setLevel(logging.ERROR)
class TestResults(unittest.TestCase):
def setUp(self):
# Data was taken from a subset of row 42 of Bacillus subtilis run results
self.beta1 = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'],
['tf1', 'tf2', 'tf3', 'tf4', 'tf5'])
self.rescaled_beta1 = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'],
['tf1', 'tf2', 'tf3', 'tf4', 'tf5'])
self.beta2 = pd.DataFrame(np.array([[0, 0.2612011, 0.1922999, 0.00000000, 0.19183277]]), ['gene1'],
['tf1', 'tf2', 'tf3', 'tf4', 'tf5'])
self.rescaled_beta2 = pd.DataFrame(np.array([[0, 0.09109101, 0.05830292, 0.00000000, 0.3675702]]), ['gene1'],
['tf1', 'tf2', 'tf3', 'tf4', 'tf5'])
# Toy data
self.beta = pd.DataFrame(np.array([[0, 1], [0.5, 0.05]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
self.beta_resc = pd.DataFrame(np.array([[0, 1.1], [1, 0.05]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
self.prior = pd.DataFrame([[0, 1], [1, 0]], ['gene1', 'gene2'], ['tf1', 'tf2'])
self.gold_standard = pd.DataFrame([[0, 1], [1, 0]], ['gene1', 'gene2'], ['tf1', 'tf2'])
self.gold_standard_unaligned = pd.DataFrame([[0, 1], [0, 0]], ['gene1', 'gene3'], ['tf1', 'tf2'])
@staticmethod
def make_PR_data(gs, confidences):
data = utils.melt_and_reindex_dataframe(confidences, value_name=CONFIDENCE_COLUMN).reset_index()
data = data.join(utils.melt_and_reindex_dataframe(gs, value_name=GOLD_STANDARD_COLUMN),
on=[TARGET_COLUMN, REGULATOR_COLUMN], how='outer')
return data
class TestResultsProcessor(TestResults):
def test_full_stack(self):
rp = results_processor.ResultsProcessor([self.beta], [self.beta_resc])
result = rp.summarize_network(None, self.gold_standard, self.prior)
self.assertEqual(result.score, 1)
def test_combining_confidences_two_betas_negative_values_assert_nonzero_betas(self):
_, _, betas_non_zero = results_processor.ResultsProcessor.threshold_and_summarize([self.beta1, self.beta2], 0.5)
np.testing.assert_equal(betas_non_zero, np.array([[1, 1, 2, 1, 2]]))
def test_combining_confidences_two_betas_negative_values_assert_sign_betas(self):
_, betas_sign, _ = results_processor.ResultsProcessor.threshold_and_summarize([self.beta1, self.beta2], 0.5)
np.testing.assert_equal(betas_sign, np.array([[-1, 1, 2, -1, 2]]))
def test_threshold_and_summarize_one_beta(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
thresholded_mat, _, _ = results_processor.ResultsProcessor.threshold_and_summarize([beta1], 0.5)
np.testing.assert_equal(thresholded_mat.values, np.array([[1, 0], [1, 0]]))
def test_threshold_and_summarize_two_betas(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [0.5, 1]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
thresholded_mat, _, _ = results_processor.ResultsProcessor.threshold_and_summarize([beta1, beta2], 0.5)
np.testing.assert_equal(thresholded_mat.values,
np.array([[1, 0], [1, 1]]))
def test_threshold_and_summarize_three_betas(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
beta3 = pd.DataFrame(np.array([[0.5, 0.2], [0.5, 0.1]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
thresholded_mat, _, _ = results_processor.ResultsProcessor.threshold_and_summarize([beta1, beta2, beta3], 0.5)
np.testing.assert_equal(thresholded_mat.values,
np.array([[1, 0], [1, 0]]))
def test_threshold_and_summarize_three_betas_negative_values(self):
beta1 = pd.DataFrame(np.array([[1, 0], [-0.5, 0]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [-0.5, 1]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
beta3 = pd.DataFrame(np.array([[-0.5, 0.2], [-0.5, 0.1]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
thresholded_mat, _, _ = results_processor.ResultsProcessor.threshold_and_summarize([beta1, beta2, beta3], 0.5)
np.testing.assert_equal(thresholded_mat.values,
np.array([[1, 0], [1, 1]]))
def test_mean_and_median(self):
beta1 = pd.DataFrame(np.array([[1, 1], [1, 1]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
beta2 = pd.DataFrame(np.array([[2, 2], [2, 2]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
mean, median = results_processor.ResultsProcessor.mean_and_median([beta1, beta2])
np.testing.assert_equal(mean, np.array([[1.5, 1.5], [1.5, 1.5]]))
np.testing.assert_equal(median, np.array([[1.5, 1.5], [1.5, 1.5]]))
class TestNetworkCreator(TestResults):
def setUp(self):
super(TestNetworkCreator, self).setUp()
self.metric = MetricHandler.get_metric("aupr")
self.pr_calc = self.metric([self.rescaled_beta1, self.rescaled_beta2], self.gold_standard,
"keep_all_gold_standard")
self.beta_sign, self.beta_nonzero = results_processor.ResultsProcessor.summarize([self.beta1, self.beta2])
self.beta_threshold = results_processor.ResultsProcessor.passes_threshold(self.beta_nonzero, 2, 0.5)
def test_process_network(self):
net = results_processor.ResultsProcessor.process_network(self.pr_calc, self.prior,
beta_threshold=self.beta_threshold)
self.assertListEqual(net['regulator'].tolist(), ['tf5', 'tf4', 'tf1'])
self.assertListEqual(net['target'].tolist(), ['gene1'] * 3)
self.assertListEqual(net['combined_confidences'].tolist(), [0.6, 0.3, 0.1])
def test_network_summary(self):
temp_dir = tempfile.mkdtemp()
net = results_processor.ResultsProcessor.process_network(self.pr_calc, self.prior,
beta_threshold=self.beta_threshold)
result = results_processor.InferelatorResults(net, self.beta_threshold, self.pr_calc.all_confidences,
self.pr_calc)
result.write_result_files(temp_dir)
processed_data = pd.read_csv(os.path.join(temp_dir, "network.tsv"), sep="\t", index_col=None, header=0)
self.assertEqual(processed_data.shape[0], 3)
self.assertListEqual(processed_data['regulator'].tolist(), ['tf5', 'tf4', 'tf1'])
self.assertListEqual(processed_data['target'].tolist(), ['gene1'] * 3)
self.assertListEqual(processed_data['combined_confidences'].tolist(), [0.6, 0.3, 0.1])
shutil.rmtree(temp_dir)
class TestRankSummary(TestResults):
def setUp(self):
super(TestRankSummary, self).setUp()
self.metric = RankSummingMetric
def test_making_network_dataframe(self):
calc = self.metric([self.beta_resc, self.beta_resc], self.gold_standard_unaligned)
pdt.assert_frame_equal(calc.gold_standard, self.gold_standard_unaligned)
self.assertEqual(calc.confidence_data.shape[0], 6)
self.assertEqual(pd.isnull(calc.confidence_data[CONFIDENCE_COLUMN]).sum(), 2)
self.assertEqual(pd.isnull(calc.confidence_data[GOLD_STANDARD_COLUMN]).sum(), 2)
def test_combining_confidences_one_beta(self):
# rescaled betas are only in the
beta = pd.DataFrame(np.array([[0.5, 0], [0.5, 1]]), ['gene1', 'gene2'], ['tf1', 'tf2'])
confidences = self.metric.compute_combined_confidences([beta])
np.testing.assert_equal(confidences.values,
np.array([[0.5, 0.0], [0.5, 1.0]]))
def test_combining_confidences_one_beta_invariant_to_rescale_division(self):
# rescaled betas are only in the
beta = pd.DataFrame( | np.array([[1, 0], [1, 2]]) | numpy.array |
# -*- coding: utf-8 -*_
"""
fbe.py
=========================================================================
FBE module provides several utilities and signal parametrization methods.
"""
__author__ = '<NAME>'
import spectrum
import numpy as np
from typing import Tuple
from scipy.io import wavfile
import scipy.signal
import os
import soundfile as sf
class sin2cos2:
"""
Class for computing signal windowing function with sin(x)^2 and cos(x)^2 tails.
:param frame: the frame length in samples
:type frame: int
:param overlap: the size of the overlaping part of the window (the length of the tails on both sides)
:type overlap: int
:return: nothing
"""
def __init__(self, frame : int = 512, overlap : int = 50):
self._win = np.zeros((frame,))
self._frame = frame
self._overlap = overlap
self._compute_window()
def _compute_window(self):
for i in range(self._overlap):
self._win[i] = np.sin(2*np.pi/(4*(self._overlap+2))*(i+1))**2
for i in range(self._overlap,self._frame-self._overlap):
self._win[i] = 1
for i in range(self._frame-self._overlap,self._frame):
self._win[i] = np.cos(2*np.pi/(4*(self._overlap+2))*(i-self._frame+self._overlap+1))**2
def window(self):
"""
Method returning the vector of window's values.
:return: the window
:rtype: numpy array of length frame
"""
return self._win
class fbe:
"""
Versatile class computing various speech signal representations, mostly based on AR modelling and Mel Frequency
Filterbanks.
:param frame_zero_adding: required length of the sequence after zero adding operation, defaults to None, which indicates no zero adding
:type frame_zero_adding: int
:param frame: frame length in samples
:type frame: int
:param sr: sampling frequency in Hz
:type sr: float
:param preem_alfa: the preemphasis coefficient
:type preem_alfa: float
:param freq_range: frequency range in which the mel frequency filterbanks should be computed
:type freq_range: np.ndarray two elemements vector of floats
:param filts_num: number of mel frequency triangular filters in the filterbank
:type filts_num: int
:param window: the windowing function
:type window: np.ndarray, numpy vector of floats, defaults to None, which causes using of rectangular window
:param ar_order: the AR model order
:type ar_order: int
:param cepstral_lifter: the cepstral lifter in MFCC computation
:type cepstral_lifter: int
:param num_ceps: number of cepstra
:type num_ceps: int
:returns: nothing
.. note:: PSD is abbreviation for power spectral density in the whole documentation. AR is abbreviation for
autoregressive in the whole documentation.
"""
def __init__(self, frame_zero_adding=None, frame=512, sr=16000, preem_alfa=0.95, overlap=0,
freq_range=[20., 8000.], filts_num=23, num_gfs=70, spl_of_max_amplitude=88,
window=None, ar_order=16, cepstral_lifter=22, num_ceps=13):
if overlap==0 or overlap > frame/2:
overlap = frame/2
if window is None:
window = np.ones((frame,))
if frame != len(window):
print("ERROR in fbe, frame and window lengths do not match, program exits ...")
sys.exit(1)
self.sr = sr # sampling frequency in Hz
self.frame = frame # number of samples in the frame
self.num_ceps = num_ceps
if not frame_zero_adding is None:
self._nfft = frame_zero_adding # fft length, sets the self._nfft atribute
else:
self._nfft = frame
self.preem_alfa = preem_alfa # preemphasis coefficient
self.freq_range = freq_range # frequency range in Hz
self.filts_num = filts_num # number of triangular filterbank channels
self.K = int(self._nfft / 2.) + 1 # length of the unique part of the FFT
self.f_min = 0
self.f_max = float(sr) / 2.
self.f_low = self.freq_range[0]
self.f_high = self.freq_range[1]
# matrices
self._tfb = self._tfb() # compute the mel-frequency triangular filterbank, sets the H atribute
self._pinv_tfb = self._pinv_tfb()
self._wgh_mat = self._wgh_mat()
self._inv_wgh_mat = self._inv_wgh_mat()
# window
self._window = window
self._ar_order = ar_order
# compute cepstral lifter
L = cepstral_lifter
N = num_ceps
self.cepstral_lifter = 1+0.5*L*np.sin(np.pi*np.asarray(range(N))/float(L))
# dct matrix
self.dctmat = np.zeros((self.num_ceps,self.filts_num))
for i in range(self.num_ceps):
for j in range(self.filts_num):
self.dctmat[i,j] = | np.sqrt(2./self.filts_num) | numpy.sqrt |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append( | np.mean(drnnGRUreluRewards26) | numpy.mean |
"""Definitions of ground truth NSRTs for all environments."""
from typing import List, Sequence, Set, cast
import itertools
import numpy as np
from predicators.src.envs import create_env, BlocksEnv, PaintingEnv, \
PlayroomEnv, BehaviorEnv
from predicators.src.structs import NSRT, Predicate, State, GroundAtom, \
ParameterizedOption, Variable, Type, LiftedAtom, Object, Array
from predicators.src.settings import CFG
from predicators.src.envs.behavior_options import navigate_to_param_sampler, \
grasp_obj_param_sampler, place_ontop_obj_pos_sampler
from predicators.src.envs import get_cached_env_instance, ToolsEnv
from predicators.src.utils import null_sampler
def get_gt_nsrts(predicates: Set[Predicate],
options: Set[ParameterizedOption]) -> Set[NSRT]:
"""Create ground truth NSRTs for an env."""
if CFG.env in ("cover", "cover_hierarchical_types", "cover_typed_options",
"cover_regrasp", "cover_multistep_options",
"cover_multistep_options_fixed_tasks"):
nsrts = _get_cover_gt_nsrts()
elif CFG.env == "cluttered_table":
nsrts = _get_cluttered_table_gt_nsrts()
elif CFG.env == "cluttered_table_place":
nsrts = _get_cluttered_table_gt_nsrts(with_place=True)
elif CFG.env == "blocks":
nsrts = _get_blocks_gt_nsrts()
elif CFG.env == "behavior":
nsrts = _get_behavior_gt_nsrts() # pragma: no cover
elif CFG.env == "painting":
nsrts = _get_painting_gt_nsrts()
elif CFG.env == "tools":
nsrts = _get_tools_gt_nsrts()
elif CFG.env == "playroom":
nsrts = _get_playroom_gt_nsrts()
elif CFG.env == "repeated_nextto":
nsrts = _get_repeated_nextto_gt_nsrts()
else:
raise NotImplementedError("Ground truth NSRTs not implemented")
# Filter out excluded predicates from NSRTs, and filter out NSRTs whose
# options are excluded.
final_nsrts = set()
for nsrt in nsrts:
if nsrt.option not in options:
continue
nsrt = nsrt.filter_predicates(predicates)
final_nsrts.add(nsrt)
return final_nsrts
def _get_from_env_by_names(env_name: str, names: Sequence[str],
env_attr: str) -> List:
"""Helper for loading types, predicates, and options by name."""
env = create_env(env_name)
name_to_env_obj = {}
for o in getattr(env, env_attr):
name_to_env_obj[o.name] = o
assert set(name_to_env_obj).issuperset(set(names))
return [name_to_env_obj[name] for name in names]
def _get_types_by_names(env_name: str, names: Sequence[str]) -> List[Type]:
"""Load types from an env given their names."""
return _get_from_env_by_names(env_name, names, "types")
def _get_predicates_by_names(env_name: str,
names: Sequence[str]) -> List[Predicate]:
"""Load predicates from an env given their names."""
return _get_from_env_by_names(env_name, names, "predicates")
def _get_options_by_names(env_name: str,
names: Sequence[str]) -> List[ParameterizedOption]:
"""Load parameterized options from an env given their names."""
return _get_from_env_by_names(env_name, names, "options")
def _get_cover_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for CoverEnv or environments that inherit from
CoverEnv."""
# Types
block_type, target_type, robot_type = _get_types_by_names(
CFG.env, ["block", "target", "robot"])
# Objects
block = Variable("?block", block_type)
robot = Variable("?robot", robot_type)
target = Variable("?target", target_type)
# Predicates
IsBlock, IsTarget, Covers, HandEmpty, Holding = \
_get_predicates_by_names(CFG.env, ["IsBlock", "IsTarget", "Covers",
"HandEmpty", "Holding"])
# Options
if CFG.env in ("cover", "cover_hierarchical_types", "cover_regrasp"):
PickPlace, = _get_options_by_names(CFG.env, ["PickPlace"])
elif CFG.env in ("cover_typed_options", "cover_multistep_options",
"cover_multistep_options_fixed_tasks"):
Pick, Place = _get_options_by_names(CFG.env, ["Pick", "Place"])
if CFG.env in ("cover_multistep_options",
"cover_multistep_options_fixed_tasks") and \
CFG.cover_multistep_use_learned_equivalents:
LearnedEquivalentPick, LearnedEquivalentPlace = _get_options_by_names(
CFG.env, ["LearnedEquivalentPick", "LearnedEquivalentPlace"])
nsrts = set()
# Pick
parameters = [block]
holding_predicate_args = [block]
if CFG.env in ("cover_multistep_options",
"cover_multistep_options_fixed_tasks"):
parameters.append(robot)
holding_predicate_args.append(robot)
preconditions = {LiftedAtom(IsBlock, [block]), LiftedAtom(HandEmpty, [])}
add_effects = {LiftedAtom(Holding, holding_predicate_args)}
delete_effects = {LiftedAtom(HandEmpty, [])}
if CFG.env in ("cover", "cover_hierarchical_types", "cover_regrasp"):
option = PickPlace
option_vars = []
elif CFG.env in ("cover_multistep_options",
"cover_multistep_options_fixed_tasks") and \
CFG.cover_multistep_use_learned_equivalents:
option = LearnedEquivalentPick
option_vars = [block, robot]
elif CFG.env in ("cover_typed_options", "cover_multistep_options",
"cover_multistep_options_fixed_tasks"):
option = Pick
option_vars = [block]
if CFG.env in ("cover_multistep_options",
"cover_multistep_options_fixed_tasks") and \
CFG.cover_multistep_use_learned_equivalents:
def pick_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
# The only things that change are the block's grasp, and the
# robot's grip, holding, x, and y.
del goal # unused
assert len(objs) == 2
block, robot = objs
assert block.is_instance(block_type)
assert robot.is_instance(robot_type)
bx, by = state.get(block, "x"), state.get(block, "y")
rx, ry = state.get(robot, "x"), state.get(robot, "y")
bw = state.get(block, "width")
if CFG.cover_multistep_degenerate_oracle_samplers:
desired_x = float(bx)
else:
desired_x = rng.uniform(bx - bw / 2, bx + bw / 2)
# is_block, is_target, width, x, grasp, y, height
# grasp changes from -1.0 to 1.0
block_param = [0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0]
# x, y, grip, holding
# grip changes from -1.0 to 1.0
# holding changes from -1.0 to 1.0
robot_param = [desired_x - rx, by - ry, 2.0, 2.0]
param = block_param + robot_param
return | np.array(param, dtype=np.float32) | numpy.array |
#!/usr/bin/env python3
import gym
import gym_bfw
import time
import argparse
import numpy as np
import torch
from lib import wrappers
from lib import dqn_model
import collections
DEFAULT_ENV_NAME = "Bfw-v0"
def play_round(net, state, counter, total_reward):
state_v = torch.tensor(np.array([state], copy=False))
q_vals = net(state_v).data.numpy()[0]
action = np.argmax(q_vals)
counter[action] += 1
state, reward, done, _ = env.step(action)
total_reward += reward
if done:
return True
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m1", "--model1", required=True, help="Model player 1 file to load")
parser.add_argument("-m2", "--model2", required=True, help="Model player 2 file to load")
parser.add_argument("-e", "--env", default=DEFAULT_ENV_NAME,
help="Environment name to use, default=" + DEFAULT_ENV_NAME)
args = parser.parse_args()
env = wrappers.make_env(args.env, gui=True, scenario="multi_side_ai")
net1 = dqn_model.DQN(env.observation_space.shape, env.action_space.n)
net2 = dqn_model.DQN(env.observation_space.shape, env.action_space.n)
net1.load_state_dict(torch.load(args.model1, map_location=lambda storage, loc: storage))
net2.load_state_dict(torch.load(args.model2, map_location=lambda storage, loc: storage))
state1 = env.reset()
state2 = state1
total_reward1 = 0.0
total_reward2 = 0.0
counter1 = collections.Counter()
counter2 = collections.Counter()
epsilon = 0.2
frame = 0
while True:
if frame % 2 == 0:
if np.random.random() < epsilon:
action = env.action_space.sample()
else:
state_v = torch.tensor(np.array([state1], copy=False))
q_vals = net1(state_v).data.numpy()[0]
action = np.argmax(q_vals)
counter1[action] += 1
state1, reward, done, _ = env.step((0, action))
total_reward1 += reward
if done:
break
# if play_round(net1, state1, c1, total_reward1):
# break
else:
if | np.random.random() | numpy.random.random |
"""
The pycity_scheduling framework
Copyright (C) 2022,
Institute for Automation of Complex Power Systems (ACS),
E.ON Energy Research Center (E.ON ERC),
RWTH Aachen University
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import unittest
import datetime
import logging
import warnings
import pyomo.environ as pyomo
from pyomo.opt import TerminationCondition
from shapely.geometry import Point
from pycity_scheduling import constants, solvers
from pycity_scheduling.classes import *
from pycity_scheduling.util.metric import *
class TestModule(unittest.TestCase):
def test_filter_entities(self):
e = get_env(4, 8)
bd = Building(e)
bes = BuildingEnergySystem(e)
pv = Photovoltaic(e, 0)
bes.addDevice(pv)
bd.addEntity(bes)
def do_test(gen):
entities = list(gen)
self.assertEqual(1, len(entities))
self.assertIn(pv, entities)
do_test(filter_entities(bd.get_entities(), 'PV'))
do_test(filter_entities(bd, 'generation_devices'))
do_test(filter_entities(bd, [Photovoltaic]))
do_test(filter_entities(bd, ['PV']))
do_test(filter_entities(bd, {'PV': Photovoltaic}))
with self.assertRaises(ValueError):
next(filter_entities(bd, 'PPV'))
with self.assertRaises(ValueError):
next(filter_entities(bd, [int]))
with self.assertRaises(ValueError):
next(filter_entities(bd, None))
return
class TestBattery(unittest.TestCase):
def setUp(self):
e = get_env(3)
self.bat = Battery(e, 10, 20, soc_init=0.875, eta=0.5)
return
def test_populate_model(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
model.c1 = pyomo.Constraint(expr=self.bat.model.e_el_vars[2] == 10)
model.c2 = pyomo.Constraint(expr=self.bat.model.e_el_vars[0] == 5)
obj = pyomo.sum_product(self.bat.model.p_el_demand_vars, self.bat.model.p_el_demand_vars)
model.o = pyomo.Objective(expr=obj)
result = solve_model(model)
# TODO stats are currently not correct due to a pyomo bug
# use result as a workaround
#model.compute_statistics()
#stats = model.statistics
#self.assertEqual(12, stats.number_of_variables)
self.assertEqual(13, result.Problem[0].number_of_variables)
var_sum = pyomo.value(pyomo.quicksum(self.bat.model.p_el_vars[t] for t in range(1, 3)))
self.assertAlmostEqual(40, var_sum, places=5)
var_sum = pyomo.value(pyomo.quicksum(
self.bat.model.p_el_supply_vars[t] + self.bat.model.p_el_demand_vars[t] for t in range(1, 3)
))
self.assertAlmostEqual(40, var_sum, places=5)
return
def test_update_model(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
demand_var = self.bat.model.p_el_vars
self.bat.update_model()
model.c1 = pyomo.Constraint(expr=self.bat.model.e_el_vars[0] == 10)
obj = pyomo.sum_product(demand_var, demand_var)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
self.assertAlmostEqual(10, pyomo.value(demand_var[0]), places=5)
return
def test_update_schedule(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
self.bat.update_model()
self.bat.model.p_el_demand_vars.setlb(3.0)
self.bat.model.p_el_demand_vars.setub(3.0)
self.bat.model.p_el_supply_vars.setlb(0.0)
self.bat.model.p_el_supply_vars.setub(0.0)
obj = pyomo.sum_product(self.bat.model.p_el_demand_vars, self.bat.model.p_el_demand_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
self.bat.update_schedule()
assert_equal_array(self.bat.p_el_schedule, [3] * 3)
assert_equal_array(self.bat.e_el_schedule, 0.875 * 10 + np.arange(1, 4)*3*0.25*0.5)
return
def test_calculate_co2(self):
self.bat.p_el_schedule = np.array([10]*3)
self.assertEqual(0, calculate_co2(self.bat))
return
def test_get_objective(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
obj = self.bat.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
for t in range(3):
self.assertIn(self.bat.model.p_el_vars[t], vs)
self.bat.model.p_el_vars[t] = t * 5
self.assertEqual(3, len(vs))
self.assertEqual(sum(2*(5*t)**2 for t in range(3)), pyomo.value(obj))
return
def test_e_ini(self):
expected_schedule = list(range(4, 21, 2))
e = get_env(3, 9, 2)
model = pyomo.ConcreteModel()
bat = Battery(e, 20, 10, soc_init=0.1, eta=0.8)
bat.populate_model(model)
model.o = pyomo.Objective(expr=-bat.model.e_el_vars[2])
for t in range(4):
bat.update_model()
solve_model(model)
bat.update_schedule()
e.timer.mpc_update()
assert_equal_array(bat.e_el_schedule, expected_schedule[:3+t*2] + [0] * 2 * (3-t))
assert_equal_array(bat.p_el_schedule, [10] * (3 + t * 2) + [0] * 2 * (3 - t))
assert_equal_array(bat.p_el_demand_schedule, [10] * (3 + t * 2) + [0] * 2 * (3 - t))
assert_equal_array(bat.p_el_supply_schedule, [0] * 9)
return
def test_no_discharge(self):
e = get_env(9, 9)
model = pyomo.ConcreteModel()
bat = Battery(e, 30, 10, p_el_max_discharge=0, soc_init=0.5, eta=1)
bat.populate_model(model)
bat.update_model()
model.o = pyomo.Objective(expr=pyomo.sum_product(bat.model.p_el_vars))
solve_model(model)
bat.update_schedule()
assert_equal_array(bat.e_el_schedule, [15] * 9)
assert_equal_array(bat.p_el_schedule, [0] * 9)
assert_equal_array(bat.p_el_demand_schedule, [0] * 9)
assert_equal_array(bat.p_el_supply_schedule, [0] * 9)
return
class TestBoiler(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.bl = Boiler(e, 10, 0.4)
return
def test_calculate_co2(self):
self.bl.p_th_heat_schedule = - np.array([10] * 8)
self.bl.p_th_heat_ref_schedule = - np.array([4] * 8)
co2_em = np.array([1111]*8)
co2 = calculate_co2(self.bl, co2_emissions=co2_em)
self.assertEqual(50.0*constants.CO2_EMISSIONS_GAS, co2)
co2 = calculate_co2(self.bl, timestep=4, co2_emissions=co2_em)
self.assertEqual(25.0*constants.CO2_EMISSIONS_GAS, co2)
self.bl.load_schedule("ref")
co2 = calculate_co2(self.bl, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_GAS, co2)
return
def test_lower_activation(self):
e = get_env(4, 8)
bl = Boiler(e, 10, lower_activation_limit=0.5)
model = pyomo.ConcreteModel()
bl.populate_model(model, "integer")
bl.update_model("integer")
model.o = pyomo.Objective(expr=bl.model.p_th_heat_vars[0])
results = solve_model(model)
self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition)
bl.model.p_th_heat_vars[0].setub(-0.1)
bl.model.p_th_heat_vars[0].setlb(-4.9)
logger = logging.getLogger("pyomo.core")
oldlevel = logger.level
logger.setLevel(logging.ERROR)
results = solve_model(model)
logger.setLevel(oldlevel)
self.assertEqual(TerminationCondition.infeasible, results.solver.termination_condition)
return
def test_objective(self):
model = pyomo.ConcreteModel()
self.bl.populate_model(model)
self.bl.get_objective()
return
class TestBuilding(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.bd = Building(e)
return
def test_get_objective(self):
model = pyomo.ConcreteModel()
env = self.bd.environment
env.prices.tou_prices[:4] = [1, 2, 3, 4]
env.prices.co2_prices[:4] = [5, 4, 3, 2]
bes = BuildingEnergySystem(env)
self.bd.addEntity(bes)
self.bd.populate_model(model)
obj = self.bd.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(4, len(vs))
for t in range(4):
self.bd.model.p_el_vars[t].value = 10**t
self.assertAlmostEqual(2*4321/10*4, pyomo.value(obj), places=5)
model = pyomo.ConcreteModel()
bd2 = Building(env, 'co2')
bd2.addEntity(bes)
bd2.populate_model(model)
obj = bd2.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(4, len(vs))
for t in range(4):
bd2.model.p_el_vars[t].value = 10**t
# numerical errors caused by /14 and co2_prices being np.float32
self.assertAlmostEqual(2*2345/14*4, pyomo.value(obj), places=3)
model = pyomo.ConcreteModel()
bd3 = Building(env, 'peak-shaving')
bd3.addEntity(bes)
bd3.populate_model(model)
obj = bd3.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(4, len(vs))
for t in range(4):
bd3.model.p_el_vars[t].value = 10**t
self.assertEqual(2*1010101, pyomo.value(obj))
model = pyomo.ConcreteModel()
bd4 = Building(env, None)
bd4.addEntity(bes)
bd4.populate_model(model)
obj = bd4.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(0, len(vs))
for t in range(4):
bd4.model.p_el_vars[t].value = 10 ** t
self.assertEqual(0, pyomo.value(obj))
bd5 = Building(env, "invalid")
self.assertRaisesRegex(ValueError, ".*Building.*", bd5.get_objective)
return
def test_calculate_co2(self):
bes = BuildingEnergySystem(self.bd.environment)
pv = Photovoltaic(self.bd.environment, 0)
bes.addDevice(pv)
self.bd.addEntity(bes)
self.bd.p_el_schedule = np.array([-5] * 2 + [5] * 4 + [-5] * 2)
self.bd.p_el_ref_schedule = np.array([-2] * 2 + [2] * 4 + [-2] * 2)
pv.p_el_schedule = - np.array([10]*8)
pv.p_el_ref_schedule = - np.array([4]*8)
co2_em = np.array([100]*4 + [400]*4)
co2 = calculate_co2(self.bd, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_PV+1250.0, co2)
co2 = calculate_co2(self.bd, timestep=4, co2_emissions=co2_em)
self.assertEqual(10.0*constants.CO2_EMISSIONS_PV+250.0, co2)
self.bd.load_schedule("ref")
co2 = calculate_co2(self.bd, co2_emissions=co2_em)
self.assertEqual(8.0*constants.CO2_EMISSIONS_PV+500.0, co2)
return
def test_robustness(self):
model = pyomo.ConcreteModel()
env = self.bd.environment
bes = BuildingEnergySystem(env)
self.bd.addEntity(bes)
ths1 = ThermalHeatingStorage(env, 10)
bes.addDevice(ths1)
ths2 = ThermalHeatingStorage(env, 25)
bes.addDevice(ths2)
ap = Apartment(env)
self.bd.addEntity(ap)
loadcurve = np.array([15, 15, 10, 10])
sh = SpaceHeating(env, loadcurve=loadcurve)
ap.addEntity(sh)
eh = ElectricalHeater(env, 20)
bes.addDevice(eh)
self.bd.populate_model(model, robustness=(3, 0.5))
self.bd.update_model(robustness=(3, 0.5))
assert_equal_array(np.array([self.bd.model.lower_robustness_bounds[i].value for i in range(3)]),
np.cumsum(loadcurve[:3])*0.5/4)
assert_equal_array(np.array([self.bd.model.upper_robustness_bounds[i].value for i in range(3)]),
35 - np.cumsum(loadcurve[:3]) * 0.5 / 4)
self.assertEqual(17.5, self.bd.model.lower_robustness_bounds[3].value)
self.assertEqual(17.5, self.bd.model.upper_robustness_bounds[3].value)
return
def testReset(self):
env = self.bd.environment
bes = BuildingEnergySystem(env)
self.bd.addEntity(bes)
schedules = list(self.bd.schedules.keys())
model = pyomo.ConcreteModel()
self.bd.populate_model(model)
self.bd.update_model()
model.o = pyomo.Objective(expr=pyomo.sum_product(self.bd.model.p_el_vars))
solve_model(model)
self.assertEqual(schedules, list(self.bd.schedules.keys()))
self.bd.update_schedule()
self.assertEqual(schedules, list(self.bd.schedules.keys()))
self.bd.schedules["ref"]["p_el"] = np.arange(8)
self.bd.copy_schedule("new", "ref")
schedules.append("new")
self.bd.reset("ref")
for k in schedules:
if k == "new":
e = np.arange(8)
else:
e = np.zeros(8)
assert_equal_array(self.bd.schedules[k]["p_el"], e)
self.bd.reset()
for k in schedules:
assert_equal_array(self.bd.schedules[k]["p_el"], np.zeros(8))
self.assertEqual(schedules, list(self.bd.schedules.keys()))
with self.assertRaises(KeyError):
self.bd.load_schedule("nonexistent")
self.bd.p_el_schedule
with self.assertRaises(KeyError):
self.bd.load_schedule(None)
self.bd.p_el_schedule
return
class TestChiller(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.ch = Chiller(e, 10, cop=np.full(8, 11))
return
def test_update_model(self):
m = pyomo.ConcreteModel()
self.ch.populate_model(m)
self.ch.update_model()
c = self.ch.model.p_coupl_constr[0]
f, l = pyomo.current.decompose_term(c.body)
self.assertTrue(f)
for coeff, value in l:
if value is self.ch.model.p_el_vars[0]:
self.assertEqual(11, coeff)
if value is self.ch.model.p_th_cool_vars[0]:
self.assertEqual(1, coeff)
if value is None:
self.assertEqual(0, coeff)
return
def test_lower_activation(self):
e = get_env(4, 8)
ch = Chiller(e, 10, cop=np.full(8, 11), lower_activation_limit=0.5)
m = pyomo.ConcreteModel()
ch.populate_model(m, "integer")
ch.update_model("integer")
obj = pyomo.sum_product(ch.model.p_th_cool_vars, ch.model.p_th_cool_vars)
obj += 2 * 3 * pyomo.sum_product(ch.model.p_th_cool_vars)
m.o = pyomo.Objective(expr=obj)
solve_model(m)
ch.update_schedule()
assert_equal_array(ch.p_th_cool_schedule[:4], [-5] * 4)
return
class TestCurtailableLoad(unittest.TestCase):
combinations = [(4, 1), (3, 1), (2, 1), (1, 1),
(1, 3), (1, 4), (2, 2), (2, 3),
(0, 1), (0, 2), (0, 3), (0, 4)]
horizon = 5
def setUp(self):
self.e = get_env(5, 20)
return
def test_populate_model(self):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
cl.update_schedule()
self.assertAlmostEqual(5, pyomo.value(obj))
self.assertTrue(5, sum(cl.p_el_schedule[:5]))
return
def test_populate_model_on_off(self):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, 2, 2)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
cl.update_schedule()
self.assertAlmostEqual(7, pyomo.value(obj))
self.assertAlmostEqual(7, sum(cl.p_el_schedule[:5]))
return
def test_populate_model_integer(self):
for low, full in self.combinations:
min_states = sum(np.tile([False]*low + [True]*full, 5)[:5])
for nom in [0.5, 1, 2]:
with self.subTest(msg="max_low={} min_full={} nom={}".format(low, full, nom)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, nom, 0.75, low, full)
cl.populate_model(model, mode="integer")
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
results = solve_model(model)
cl.update_schedule()
schedule_states = np.isclose(cl.p_el_schedule[:5], [nom]*5)
assert_equal_array(cl.p_state_schedule[:5], schedule_states)
self.assertEqual(min_states, sum(schedule_states))
self.assertAlmostEqual(min_states*nom+(5-min_states)*nom*0.75, pyomo.value(obj))
return
def test_update_model(self):
for width in [1, 2, 4, 5]:
with self.subTest(msg="step width={}".format(width)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
for t in range(0, 20-5+1, width):
self.e.timer.current_timestep = t
cl.update_model()
solve_model(model)
cl.update_schedule()
self.assertAlmostEqual(5, pyomo.value(obj))
self.assertAlmostEqual(5, sum(cl.p_el_schedule[t:t+5]))
return
def test_update_model_on_off(self):
for low, full in self.combinations:
for width in [1, 2, 4, 5]:
with self.subTest(msg="max_low={} min_full={} step width={}".format(low, full, width)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, low, full)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
for t in range(0, 20-5+1, width):
self.e.timer.current_timestep = t
cl.update_model()
solve_model(model)
cl.update_schedule()
endtimestep = self.e.timer.current_timestep + cl.op_horizon
for t in range(0, endtimestep):
self.assertGreaterEqual(cl.p_el_schedule[t], 1)
self.assertLessEqual(cl.p_el_schedule[t], 2)
for t in range(0, endtimestep-(low+full)+1):
self.assertGreaterEqual(sum(cl.p_el_schedule[t:t+low+full]) + 1e-4, 1*low + 2*full)
return
def test_update_model_integer(self):
for low, full in self.combinations:
states = np.tile([False] * low + [True] * full, 20)[:20]
for width in [1, 2, 4, 5]:
with self.subTest(msg="max_low={} min_full={} step width={}".format(low, full, width)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, low, full)
cl.populate_model(model, mode="integer")
obj = pyomo.sum_product(cl.model.p_el_vars)
for t in range(0, 20-5+1, width):
self.e.timer.current_timestep = t
cl.update_model(mode="integer")
model.o = pyomo.Objective(expr=obj)
results = solve_model(model)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
best_obj = pyomo.value(obj)
model.o_constr = pyomo.Constraint(expr=best_obj == obj)
model.del_component("o")
model.o = pyomo.Objective(expr=pyomo.sum_product(range(0, -cl.op_horizon, -1),
cl.model.p_el_vars))
results = solve_model(model)
model.del_component("o")
model.del_component("o_constr")
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
cl.update_schedule()
schedule_states_el = np.isclose(cl.p_el_schedule[t:t+5], [2] * 5)
schedule_states_b = np.isclose(cl.p_state_schedule[t:t+5], [1] * 5)
assert_equal_array(schedule_states_b, states[t:t + 5])
assert_equal_array(schedule_states_el, schedule_states_b)
assert_equal_array(
cl.p_el_schedule[t:t+5],
np.full(5, 2 * 0.5) + np.array(states[t:t+5]) * (2 * (1. - 0.5))
)
return
def test_integer_first(self):
for low, full in self.combinations:
if low > 0:
with self.subTest(msg="max_low={} min_full={}".format(low, full)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, low, full)
cl.populate_model(model, mode="integer")
self.e.timer.current_timestep = 1
cl.p_state_schedule[0] = False
cl.p_el_schedule[0] = 1
cl.update_model("integer")
cl.model.p_state_vars[0].setub(1.0)
cl.model.p_state_vars[0].setlb(1.0)
cl.model.p_state_vars[1].setub(0.0)
cl.model.p_state_vars[1].setlb(0.0)
model.o = pyomo.Objective(expr=cl.model.p_state_vars[0])
logger = logging.getLogger("pyomo.core")
oldlevel = logger.level
logger.setLevel(logging.ERROR)
results = solve_model(model)
logger.setLevel(oldlevel)
if full > 1:
self.assertEqual(results.solver.termination_condition, TerminationCondition.infeasible)
else:
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
return
def test_small_horizon(self):
for width in [1, 2, 4]:
for horizon in [1, 2, 4]:
if horizon >= width:
with self.subTest(msg="width={} horizon={}".format(width, horizon)):
e = get_env(horizon, 20)
model = pyomo.ConcreteModel()
cl = CurtailableLoad(e, 2, 0.5)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
for t in range(0, 21 - horizon, width):
e.timer.current_timestep = t
cl.update_model()
solve_model(model)
self.assertEqual(1, pyomo.value(cl.model.p_el_vars[0]))
cl.update_schedule()
assert_equal_array(cl.p_el_schedule, [1] * 20)
return
def test_small_horizon_low_full(self):
for horizon in [1, 2, 4]:
e = get_env(horizon, 20)
for width in [1, 2, 4]:
if horizon >= width:
for low, full in self.combinations:
with self.subTest(msg="width={} horizon={} max_low={} min_full={}"
.format(width, horizon, low, full)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(e, 2, 0.5, low, full)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.c = pyomo.Objective(expr=obj)
for t in range(0, 21 - horizon, width):
e.timer.current_timestep = t
cl.update_model()
solve_model(model)
cl.update_schedule()
for t in range(0, 20 - (low + full) + 1):
self.assertGreaterEqual(sum(cl.p_el_schedule[t:t + low + full]) + 1e-4,
1 * low + 2 * full,
np.array2string(cl.p_el_schedule))
return
def test_small_horizon_low_full_integer(self):
for horizon in [1, 2, 4]:
e = get_env(horizon, 20)
for width in [1, 2, 4]:
if horizon >= width:
for low, full in self.combinations:
with self.subTest(msg="width={} horizon={} max_low={} min_full={}".format(width, horizon, low, full)):
states = np.tile([1] * low + [2] * full, 20)[:20]
model = pyomo.ConcreteModel()
cl = CurtailableLoad(e, 2, 0.5, low, full)
cl.populate_model(model, mode="integer")
obj = pyomo.sum_product(cl.model.p_el_vars)
for t in range(0, 21 - horizon, width):
e.timer.current_timestep = t
cl.update_model(mode="integer")
model.o = pyomo.Objective(expr=obj)
results = solve_model(model)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
best_obj = pyomo.value(obj)
model.o_constr = pyomo.Constraint(expr=best_obj == obj)
model.del_component("o")
model.o = pyomo.Objective(expr=pyomo.sum_product(range(-1, -cl.op_horizon-1, -1),
cl.model.p_el_vars))
results = solve_model(model)
model.del_component("o")
model.del_component("o_constr")
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
cl.update_schedule()
assert_equal_array(cl.p_el_schedule, states)
return
class TestCityDistrict(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.cd = CityDistrict(e)
return
def test_get_objective(self):
m = pyomo.ConcreteModel()
self.cd.populate_model(m)
def zero_constr(model, t):
return model.p_el_vars[t] == 0
self.cd.model.extra_constr = pyomo.Constraint(self.cd.model.t, rule=zero_constr)
m.o = pyomo.Objective(expr=self.cd.get_objective())
solve_model(m)
for t in range(4):
self.cd.model.p_el_vars[t].value = t
self.assertEqual(self.cd.objective, "price")
self.cd.environment.prices.da_prices = np.array([1]*2 + [4]*6)
self.assertAlmostEqual(8.4, pyomo.value(self.cd.get_objective()))
self.cd.objective = 'peak-shaving'
self.assertAlmostEqual(14, pyomo.value(self.cd.get_objective()))
self.cd.objective = 'valley-filling'
self.cd.valley_profile = np.array([-1]*8)
self.assertAlmostEqual(2, pyomo.value(self.cd.get_objective()))
self.cd.objective = None
self.assertAlmostEqual(0, pyomo.value(self.cd.get_objective()))
self.cd.objective = "invalid"
self.assertRaisesRegex(ValueError, ".*CityDistrict.*", self.cd.get_objective)
m = pyomo.ConcreteModel()
self.cd.objective = "max-consumption"
self.cd.populate_model(m)
self.cd.model.p_el_vars[0].setub(-1)
m.o = pyomo.Objective(expr=self.cd.get_objective())
solve_model(m)
self.assertAlmostEqual(1, pyomo.value(self.cd.get_objective()))
return
def test_calculate_costs(self):
self.cd.p_el_schedule = np.array([10]*4 + [-20]*4)
self.cd.p_el_ref_schedule = np.array([4]*4 + [-4]*4)
prices = np.array([10]*4 + [20]*4)
costs = calculate_costs(self.cd, prices=prices, feedin_factor=0.5)
self.assertEqual(-100, costs)
costs = calculate_costs(self.cd, timestep=4, prices=prices)
self.assertEqual(100, costs)
self.cd.load_schedule("ref")
costs = calculate_costs(self.cd, prices=prices)
self.assertEqual(-40, costs)
return
def test_calculate_co2(self):
pv = Photovoltaic(self.cd.environment, 0)
self.cd.addEntity(pv, Point(0, 0))
self.cd.p_el_schedule = np.array([-5] * 2 + [5] * 4 + [-5] * 2)
self.cd.p_el_ref_schedule = np.array([-2] * 2 + [2] * 4 + [-2] * 2)
pv.p_el_schedule = - np.array([10] * 8)
pv.p_el_ref_schedule = - np.array([4] * 8)
co2_em = np.array([100] * 4 + [400] * 4)
co2 = calculate_co2(self.cd, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_PV+1250.0, co2)
co2 = calculate_co2(self.cd, timestep=4, co2_emissions=co2_em)
self.assertEqual(10.0*constants.CO2_EMISSIONS_PV+250.0, co2)
self.cd.load_schedule("ref")
co2 = calculate_co2(self.cd, co2_emissions=co2_em)
self.assertEqual(8.0*constants.CO2_EMISSIONS_PV+500.0, co2)
return
def test_self_consumption(self):
pv = Photovoltaic(self.cd.environment, 0)
self.cd.addEntity(pv, Point(0, 0))
self.cd.p_el_schedule = | np.array([4]*2 + [-4]*2 + [-10]*2 + [-2]*2) | numpy.array |
#!usr/bin/python 3.6
#-*-coding:utf-8-*-
'''
@file: da.py, deterministic annealing algorithm
@Author: <NAME> (<EMAIL>)
@Date: 11/28/2019
@Paper reference: Clustering with Capacity and Size Constraints: A Deterministic Approach
'''
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
import collections
import random
from scipy.spatial.distance import cdist
import os
import sys
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path)
import base
class DeterministicAnnealing(base.Base):
def __init__(self, n_clusters, distribution, max_iters=1000,
distance_func=cdist, random_state=42, T=None):
'''
Args:
n_clusters (int): number of clusters
distribution (list): a list of ratio distribution for each cluster
T (list): inverse choice of beta coefficients
'''
super(DeterministicAnnealing, self).__init__(n_clusters, max_iters, distance_func)
self.lamb = distribution
assert np.sum(distribution) == 1
assert len(distribution) == n_clusters
assert isinstance(T, list) or T is None
self.beta = None
self.T = T
self.cluster_centers_ = None
self.labels_ = None
self._eta = None
self._demands_prob = None
random.seed(random_state)
np.random.seed(random_state)
def fit(self, X, demands_prob=None):
# setting T, loop
T = [1, 0.1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]
solutions = []
diff_list = []
is_early_terminated = False
n_samples, n_features = X.shape
self.capacity = [n_samples * d for d in self.lamb]
if demands_prob is None:
demands_prob = np.ones((n_samples, 1))
else:
demands_prob = np.asarray(demands_prob).reshape((-1, 1))
assert demands_prob.shape[0] == X.shape[0]
demands_prob = demands_prob / sum(demands_prob)
for t in T:
self.T = t
centers = self.initial_centers(X)
eta = self.lamb
labels = None
for _ in range(self.max_iters):
self.beta = 1. / self.T
distance_matrix = self.distance_func(X, centers)
eta = self.update_eta(eta, demands_prob, distance_matrix)
gibbs = self.update_gibbs(eta, distance_matrix)
centers = self.update_centers(demands_prob, gibbs, X)
self.T *= 0.999
labels = np.argmax(gibbs, axis=1)
if self._is_satisfied(labels): break
solutions.append([labels, centers])
resultant_clusters = len(collections.Counter(labels))
diff_list.append(abs(resultant_clusters - self.n_clusters))
if resultant_clusters == self.n_clusters:
is_early_terminated = True
break
# modification for non-strictly satisfaction, only works for one demand per location
# labels = self.modify(labels, centers, distance_matrix)
if not is_early_terminated:
best_index = np.argmin(diff_list)
labels, centers = solutions[best_index]
self.cluster_centers_ = centers
self.labels_ = labels
self._eta = eta
self._demands_prob = demands_prob
def predict(self, X):
distance_matrix = self.distance_func(X, self.cluster_centers_)
eta = self.update_eta(self._eta, self._demands_prob, distance_matrix)
gibbs = self.update_gibbs(eta, distance_matrix)
labels = np.argmax(gibbs, axis=1)
return labels
def modify(self, labels, centers, distance_matrix):
centers_distance = self.distance_func(centers, centers)
adjacent_centers = {i: np.argsort(centers_distance, axis=1)[i, 1:3].tolist() for i in range(self.n_clusters)}
while not self._is_satisfied(labels):
count = collections.Counter(labels)
cluster_id_list = list(count.keys())
random.shuffle(cluster_id_list)
for cluster_id in cluster_id_list:
num_points = count[cluster_id]
diff = num_points - self.capacity[cluster_id]
if diff <= 0:
continue
adjacent_cluster = None
adjacent_cluster = random.choice(adjacent_centers[cluster_id])
if adjacent_cluster is None:
continue
cluster_point_id = np.where(labels==cluster_id)[0].tolist()
diff_distance = distance_matrix[cluster_point_id, adjacent_cluster] \
- distance_matrix[cluster_point_id, cluster_id]
remove_point_id = np.asarray(cluster_point_id)[np.argsort(diff_distance)[:diff]]
labels[remove_point_id] = adjacent_cluster
return labels
def initial_centers(self, X):
selective_centers = random.sample(range(X.shape[0]), self.n_clusters)
centers = X[selective_centers]
return centers
def _is_satisfied(self, labels):
count = collections.Counter(labels)
for cluster_id in range(len(self.capacity)):
if cluster_id not in count:
return False
num_points = count[cluster_id]
if num_points > self.capacity[cluster_id]:
return False
return True
def update_eta(self, eta, demands_prob, distance_matrix):
n_points, n_centers = distance_matrix.shape
eta_repmat = np.tile(np.asarray(eta).reshape(1, -1), (n_points, 1))
exp_term = np.exp(- self.beta * distance_matrix)
divider = exp_term / np.sum(np.multiply(exp_term,
eta_repmat), axis=1).reshape((-1, 1))
eta = np.divide(np.asarray(self.lamb),
np.sum(divider * demands_prob, axis=0))
return eta
def update_gibbs(self, eta, distance_matrix):
n_points, n_centers = distance_matrix.shape
eta_repmat = np.tile(np.asarray(eta).reshape(1, -1), (n_points, 1))
exp_term = np.exp(- self.beta * distance_matrix)
factor = np.multiply(exp_term, eta_repmat)
gibbs = factor / | np.sum(factor, axis=1) | numpy.sum |
"""
This file implements different metric calculation functions for a given conversation.
"""
# Note: some imports are defined inside the methods of the metrics (for cases where only some metrics are computed)
import os
import string
from pathlib import Path
import numpy as np
import torch
from nltk.corpus import stopwords
from metric_helpers import cosine_similarity, _get_sentiment_multiplier
stopwords = stopwords.words('english')
question_words = {'who', 'what', 'why', 'where', 'how', 'when'}
_ = [stopwords.remove(q) for q in question_words]
punct = list(string.punctuation)
contractions = ["'s", "'d", "'ld", "n't", "'re", "'ll", "'ve"]
filters = set(stopwords + contractions + punct)
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def question(conversation):
"""Counts whether each utterance in the given conversation contains a question (yes: 1, no: 0)"""
num_turns = len(conversation)
is_question_in_utterance = np.zeros(num_turns)
for i, utterance in enumerate(conversation):
if any(question_word in utterance for question_word in question_words) and '?' in utterance:
is_question_in_utterance[i] = 1
return is_question_in_utterance
def conversation_repetition(conversation):
"""Counts the number of distinct words in the current utterance that were also in any of the previous utterances"""
num_turns = len(conversation)
num_repeats_in_utterances = np.zeros(num_turns)
filtered = [set(utterance).difference(filters) for utterance in conversation] # filter stopwords, contractions and punctuation
for i in range(1, num_turns):
current = filtered[i]
prev = set.union(*filtered[:i])
repeats = current.intersection(prev)
num_repeats_in_utterances[i] = len(repeats)
return num_repeats_in_utterances
def self_repetition(conversation): # called 'reward_conversation_repetition' (for bot utterances) in original paper
"""
Counts the number of distinct words in the current utterance that were also in any of the previous utterances of
the current speaker (assuming two-speaker multi-turn dialog)
"""
num_turns = len(conversation)
num_repeats_in_utterances = np.zeros(num_turns)
filtered = [set(utterance).difference(filters) for utterance in conversation] # filter stopwords, contractions and punctuation
# first and second utterance can't repeat any word of previous utterances of the current speaker
num_repeats_in_utterances[0] = 0
num_repeats_in_utterances[1] = 0
for i in range(2, num_turns):
current = filtered[i] # current utterance
prev = set.union(*filtered[:i][i%2::i]) # all utterances of the current speaker so far
repeats = current.intersection(prev)
num_repeats_in_utterances[i] = len(repeats)
return num_repeats_in_utterances
def utterance_repetition(conversation): # called 'word_similarity' in original paper
"""Counts the number of distinct words in the current utterance that were also in the previous utterance"""
num_turns = len(conversation)
num_repeats_in_utterances = np.zeros(num_turns)
filtered = [set(utterance).difference(filters) for utterance in
conversation] # filter stopwords, contractions and punctuation
num_repeats_in_utterances[0] = 0 # first utterance can't repeat any word of previous utterance
for i in range(1, num_turns):
current = filtered[i]
prev = filtered[i-1]
repeats = current.intersection(prev)
num_repeats_in_utterances[i] = len(repeats)
return num_repeats_in_utterances
def word_repetition(conversation): # called 'utterance_repetition' in original paper
"""Counts the number of words that occur multiple times within the same utterance (duplicates) """
num_turns = len(conversation)
num_repeats_in_utterances = np.zeros(num_turns)
filtered = [[token for token in utterance if token not in filters] for utterance in conversation] # filter stopwords, contractions and punctuation
for i in range(num_turns):
repeats = len(filtered[i]) - len(set(filtered[i])) # (difference is positive if a word occurs multiple times)
num_repeats_in_utterances[i] = repeats
return num_repeats_in_utterances
def utterance_length(conversation):
"""Counts the length of each utterance."""
filtered = [[token for token in utterance if token not in punct] for utterance in conversation] # filter punctuation
return np.array([len(filtered_utterance) for filtered_utterance in filtered])
# caveats: if the sentiment is negative, it may only be because of the topic, not the person being unhappy with the bot
def deepmoji(conversation):
"""Computes the Deepmoji sentiment of each utterance and
its (sentiment) coherence with the previous utterance in Deepmoji embedding space"""
# Init deepmoji just once
if 'botmoji' not in globals():
print('Loading deepmoji')
from torchMoji.api.botmoji import Botmoji
with torch.no_grad():
global botmoji
botmoji = Botmoji()
# botmoji takes list of utterance strings (not list of lists of tokens)
utterances = [' '.join(tokens) for tokens in conversation]
# Run deepmoji: embed utterances
sentiment_multiplier = _get_sentiment_multiplier()
utterance_emojis = botmoji.encode_multiple(utterances)
sentiments = np.dot(utterance_emojis, sentiment_multiplier)
# compute coherence (cosine similarity) of each utterance with the previous utterance
rolled = np.roll(utterance_emojis, shift=1, axis=0)
emoji_coherence = cosine_similarity(utterance_emojis, rolled)
# for the first utterance the coherence with the previous utterance cannot be computed --> manually set to zero
emoji_coherence[0] = 0.0
return [sentiments, emoji_coherence]
def infersent_coherence(conversation):
"""
Computes the (semantic) coherence of each utterance with the previous utterance in InferSent embedding space
(cosine_similarity).
"""
# Init infersent just once
if 'botsent' not in globals():
print('Loading InferSent')
from inferSent.api.botsent import Botsent
with torch.no_grad():
global botsent
dataset_dir = Path(ROOT_DIR).joinpath('datasets/reddit_casual/train')
botsent = Botsent(dataset_dir, use_pca=False)
utterances = [' '.join(tokens) for tokens in conversation]
# Run botsent: embed utterances
embedded_utterances = botsent.encode_multiple(utterances)
# compute coherence (cosine similarity) of each utterance with the previous utterance
rolled = np.roll(embedded_utterances, shift=1, axis=0)
coherence = cosine_similarity(embedded_utterances, rolled)
# for the first utterance the coherence with the previous utterance cannot be computed --> manually set to zero
coherence[0] = 0.0
return coherence
def USE_similarity(conversation):
"""
Computes the (semantic) coherence of each utterance with the previous utterance in UniversalSentenceEncoder
embedding space (cosine_similarity).
Get model:
1. download model from: https://tfhub.dev/google/universal-sentence-encoder-large/3
2. unzip at configs.project_dir/UniversalSentenceEncoder
"""
if 'universal_encoder' not in globals():
print('Loading Universal Sentence Encoder')
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
import tensorflow_hub as hub
global universal_encoder, sess, sents, embed_op
use_path = os.path.join(ROOT_DIR, "UniversalSentenceEncoder/universal-sentence-encoder-large_3")
with tf.device('/cpu:0'):
universal_encoder = hub.Module(use_path)
sents = tf.placeholder(tf.string, shape=None, name="input_sents")
embed_op = universal_encoder(sents)
sess = tf.Session()
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
utterances = [' '.join(tokens) for tokens in conversation]
# Run UniversalSentenceEncoder: embed utterances
embedded_utterances = sess.run(embed_op, feed_dict={sents: utterances})
# compute coherence (cosine similarity) of each utterance with the previous utterance
rolled = np.roll(embedded_utterances, shift=1, axis=0)
coherence = cosine_similarity(embedded_utterances, rolled)
# for the first utterance the coherence with the previous utterance cannot be computed --> manually set to zero
coherence[0] = 0.0
return coherence
def word2vec_coherence(conversation):
"""
Computes the coherence of each utterance with the previous utterance in word2vec embedding space (cosine_similarity)
Get GoogleNews vectors:
1. download vectors from: https://code.google.com/archive/p/word2vec/
2. unzip at configs.project_dir/word2vec
"""
if 'word2vec' not in globals():
print('Loading word2vec dict')
import gensim
global word2vec, keys
word2vec_path = Path(ROOT_DIR).joinpath("word2vec/GoogleNews-vectors-negative300.bin")
word2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
keys = word2vec.vocab
num_turns = len(conversation)
coherence = np.zeros(num_turns)
# Embed each word in all utterances of the conversation with word2vec and compute the mean embedding per utterance
# (average over all words in each utterance)
embedded_utterances = []
indices_of_utterances_without_embeddings = []
for idx, utterance in enumerate(conversation):
embedded_utterance = []
for word in utterance: # embed utterance
if word in keys:
embedded_utterance.append(word2vec[word])
if not embedded_utterance: # no word in the utterance could be embedded (no embedding in word2vec)
indices_of_utterances_without_embeddings.append(idx) # save index to set coherence to 0 afterwards
embedded_utterances.append(np.mean([word2vec['placeholder']], axis=0)) # add placeholder, will be set to 0!
else:
embedded_utterances.append(np.mean(embedded_utterance, axis=0))
embedded_utterances = np.array(embedded_utterances)
# compute coherence (cosine similarity) of each utterance with the previous utterance
rolled = | np.roll(embedded_utterances, shift=1, axis=0) | numpy.roll |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 10:53:34 2015
@author: adeshmu2
"""
import numpy as np
import time
from bitconv import bitconvarray
# extract features, create instances and create labels from the test data.
def createInstances(total_device_power, device_timer, device_power,weather_data,classify,timewindow):
#def createInstances(total_device_power, device_timer, device_power, classify, timewindow):
timestep = 3#device_timer[2,1] - device_timer[1,1]
numdata = len(total_device_power)
numdevices = len(device_power[0])
idxstep = int(timewindow/timestep)
numinstances = int(numdata/idxstep)
binarylabels = np.zeros(shape=(numinstances,numdevices),dtype=np.int)
# create 5 minute snippets from the given data.
stridx = 0
endidx = idxstep - 1
snippets = np.zeros(shape=(numinstances,idxstep))
snippets_timer = np.zeros(shape=(numinstances,idxstep))
snippets_tstamp = np.zeros(shape=(numinstances))
snippets_temp = np.zeros(shape=(numinstances))
snippets_devices = np.zeros(shape=(idxstep,numdevices))
labels = np.zeros(shape=(numinstances))
print('instances: {}'.format(numinstances))
for instance in range(0,numinstances):
snippets[instance,:] = total_device_power[stridx:endidx+1]
snippets_timer[instance,:] = device_timer[stridx:endidx+1,0]
snippets_tstamp[instance] = device_timer[endidx+1,0]
snippets_temp[instance] = weather_data[endidx+1]
for device in range(0,numdevices):
snippets_devices[:,device] = device_power[stridx:endidx+1,device]
# get the correct labels
labels[instance],binarylabels[instance,:] = extractLabel(snippets_devices,numdevices)
stridx = endidx + 1
endidx = endidx + idxstep
if classify == 1:
instances = extractFeaturesBayes(snippets,snippets_tstamp,snippets_temp,numinstances,weather_data,snippets_timer)
#instances = extractFeaturesBayes(snippets, snippets_tstamp, numinstances, snippets_timer)
else:
#elif classify == 2 or classify == 3 or classify == 4 or classify == 5 or classify == 6:
instances = extractFeaturesRegression(snippets,snippets_tstamp,snippets_temp,numinstances,weather_data,snippets_timer)
#instances = extractFeaturesRegression(snippets,snippets_tstamp,numinstances,snippets_timer)
return instances, labels, binarylabels, snippets
def extractFeaturesBayes(snippets,snippets_tstamp,snippets_temp,numinstances,weather_data,snippets_timer):
#def extractFeaturesBayes(snippets, snippets_tstamp, numinstances, snippets_timer):
instances = np.zeros(shape=(numinstances,7))
available_weather_data = True if np.sum(weather_data) > 0 else False
for instance in range(0,numinstances):
# feature 0 - average power instance
avg_instance = np.average(snippets[instance,:])
instances[instance,0] = avgRanking(avg_instance)
# feature 1 - std deviation of power
std_instance = np.std(snippets[instance,:])
instances[instance,1] = stdRanking(std_instance)
# feature 2 - local hour of the day fraction
local_time_sec = time.localtime(snippets_tstamp[instance])
time_hour = float(local_time_sec.tm_hour) + float(local_time_sec.tm_min)/60
instances[instance,2] = todRanking(time_hour)
# feature 3 - local average temperature during the 5 minute window
if available_weather_data > 0:
instances[instance,3] = weatherRanking(snippets_temp[instance])
else:
instances[instance, 3] = 0
# feature 4 - Maximum power reading
instances[instance, 4] = maxPowerRanking(max(snippets[instance,:]))
# feature 5 - The energy (integral of power) reading
instances[instance, 5] = energyRanking(np.trapz(snippets[instance,:],snippets_timer[instance,:]))
# feature 6 - Day of the week
instances[instance,6] = local_time_sec.tm_wday
return instances
def extractFeaturesRegression(snippets, snippets_tstamp, snippets_temp, numinstances, weather_data, snippets_timer):
#def extractFeaturesRegression(snippets, snippets_tstamp, numinstances, snippets_timer):
instances = np.zeros(shape=(numinstances, 7))
available_weather_data = True if np.sum(weather_data) > 0 else False
for instance in range(0,numinstances):
# feature 0 - average power instance
avg_instance = np.average(snippets[instance,:])
instances[instance,0] = avg_instance
# feature 1 - std deviation of power
std_instance = np.std(snippets[instance,:])
instances[instance,1] = std_instance
# feature 2 - local hour of the day fraction
local_time_sec = time.localtime(snippets_tstamp[instance])
time_hour = float(local_time_sec.tm_hour) + float(local_time_sec.tm_min)/60
instances[instance,2] = todRanking(time_hour)
# feature 3 - local average temperature during the 5 minute window
if available_weather_data:
instances[instance,3] = snippets_temp[instance]
else:
instances[instance,3] = 0
# feature 4 - Maximum power reading
instances[instance, 4] = max(snippets[instance, :])
# feature 5 - The energy (integral of power) reading
instances[instance, 5] = | np.trapz(snippets[instance, :], snippets_timer[instance, :]) | numpy.trapz |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 18:56:30 2018
@author: <NAME>
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from sklearn.decomposition import PCA
from random import seed
from random import random
from random import gauss
import random
import copy
from scipy import stats
def data_rand(N,M,sigma,Groups=2):
# create the data container
data_rand = []
Labels = []
# seed random number generator
# generate random numbers between 0-1
for _ in range(M):
mean_random = random.randint(50,150)#create one mean value
v = []#create the sample points for each variable
for k in range(N):
v.append(gauss(mean_random, random.randint(sigma,2*sigma)))
data_rand.append(v)
for _ in range(N):
Labels.append(random.randint(0,Groups-1))
return data_rand,Labels
def add_signifficance(data,Labels,Groups,averageSig,sigma,sigvars):
sig = []
for j in Groups:
if j>0:
for v in sigvars:
k = random.randint(averageSig-2*sigma,averageSig+2*sigma) + gauss(0, random.randint(sigma,2*sigma))
sig.append(k)
data[Labels==j,v] = data[Labels==j,v] + k
return data,sig
def JSDe(X,Y,w,k):
#project the data to k
N,M = np.shape(X)
T = np.repeat([k],N,0)
xp = np.sort(np.sum(X*T,1)/np.linalg.norm(k)**2)
j = 0
JSDe = 0
C = | np.unique(Y) | numpy.unique |
def transform_scalars(dataset):
"""Delete Slices in Dataset"""
from tomviz import utils
import numpy as np
axis = 0;
#----USER SPECIFIED VARIABLES-----#
###firstSlice###
###lastSlice###
###axis### #Axis along which to delete the subarray
#---------------------------------#
# Get the current dataset.
array = utils.get_array(dataset)
# Get indices of the slices to be deleted.
indices = np.linspace(firstSlice,lastSlice,lastSlice-firstSlice+1).astype(int)
# Delete the specified slices.
array = | np.delete(array,indices,axis) | numpy.delete |
import numpy as np
def getMedCurve(xar, yar, loose=True, threshold=3, error=False):
"""Takes repeated nummerical data (replicates passed as lists, stored in a list)
and computes the average and error. Useful for displaying "average" plots
with error bands.
This function was taken from the following github repo (https://github.com/CellMechLab/nanoindentation),
author Dr <NAME> at the Cellular Mechanobiology Lab, University of Glasgow."""
if loose is False:
xmin = -np.inf
xmax = np.inf
deltax = 0
nonecount = 0
for x in xar:
if x is not None and np.min(x) is not None:
xmin = np.max([xmin, np.min(x)])
xmax = np.min([xmax, np.max(x)])
deltax += ((np.max(x)-np.min(x))/(len(x)-1))
else:
nonecount += 1
deltax /= (len(xar)-nonecount)
xnew = np.linspace(xmin, xmax, int((xmax-xmin)/(deltax)))
ynew = np.zeros(len(xnew))
for i in range(len(xar)):
if xar[i] is not None and np.min(xar[i]) is not None:
ycur = np.interp(xnew, xar[i], yar[i])
ynew += ycur
ynew /= (len(xar)-nonecount)
else:
xmin = np.inf
xmax = -np.inf
deltax = 0
for x in xar:
try:
xmin = np.min([xmin, np.min(x)])
xmax = np.max([xmax, | np.max(x) | numpy.max |
"""
Monitoring algorithms for Quicklook pipeline
"""
import numpy as np
import scipy.ndimage
import yaml
from lvmspec.quicklook.qas import MonitoringAlg, QASeverity
from lvmspec.quicklook import qlexceptions
from lvmspec.quicklook import qllogger
import os,sys
import datetime
from astropy.time import Time
from lvmspec.qa import qalib
from lvmspec.io import qa
qlog=qllogger.QLLogger("QuickLook",0)
log=qlog.getlog()
def qlf_post(qadict):
"""
A general function to HTTP post the QA output dictionary, intended for QLF
requires environmental variables: QLF_API_URL, QLF_USER, QLF_PASSWD
Args:
qadict: returned dictionary from a QA
"""
#- Check for environment variables and set them here
if "QLF_API_URL" in os.environ:
qlf_url=os.environ.get("QLF_API_URL")
if "QLF_USER" not in os.environ or "QLF_PASSWD" not in os.environ:
log.warning("Environment variables are not set for QLF. Set QLF_USER and QLF_PASSWD.")
else:
qlf_user=os.environ.get("QLF_USER")
qlf_passwd=os.environ.get("QLF_PASSWD")
log.debug("Environment variables are set for QLF. Now trying HTTP post.")
#- All set. Now try to HTTP post
try:
import requests
response=requests.get(qlf_url)
#- Check if the api has json
api=response.json()
#- proceed with post
job={"name":"QL","status":0,"dictionary":qadict} #- QLF should disintegrate dictionary
response=requests.post(api['job'],json=job,auth=(qlf_user,qlf_passwd))
except:
log.error("Skipping HTTP post... Exception",exc_info=true)
else:
log.warning("Skipping QLF. QLF_API_URL must be set as environment variable")
class Get_RMS(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="RMS"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NOISE_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NOISE_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NOISE_WARN_RANGE" in parms and "NOISE_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NOISE_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NOISE_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None, qafig=None,param=None,qlf=False, refmetrics=None):
retval={}
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
# return rms values in rms/sqrt(exptime)
rmsccd=qalib.getrms(image.pix/np.sqrt(image.meta["EXPTIME"])) #- should we add dark current and/or readnoise to this as well?
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"NOISE_NORMAL_RANGE":[-1.0, 1.0],
"NOISE_WARN_RANGE":[-2.0, 2.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NOISE_AMP_REF']=kwargs["REFERENCE"]
expnum=[]
rms_row=[]
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
for i in range(image.pix[thisoverscanboundary].shape[0]):
rmsrow = qalib.getrms(image.pix[thisoverscanboundary][i]/np.sqrt(image.meta["EXPTIME"]))
rms_row.append(rmsrow)
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
rmsover=np.max(rms_over_amps)
rmsdiff_err='NORMAL'
if amps:
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
thisoverscan_values=np.ravel(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
overscan_values+=thisoverscan_values.tolist()
rmsover=np.std(overscan_values)
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
else:
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_RMS
plot_RMS(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Count_Pixels(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="COUNTPIX"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NPIX_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NPIX_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NPIX_WARN_RANGE" in parms and "NPIX_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NPIX_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NPIX_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"CUTLO":3, # low threshold for number of counts in sigmas
"CUTHI":10,
"NPIX_NORMAL_RANGE":[200.0, 500.0],
"NPIX_WARN_RANGE":[50.0, 650.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NPIX_AMP_REF']=kwargs["REFERENCE"]
#- get the counts over entire CCD in counts per second
npixlo=qalib.countpix(image.pix,nsig=param['CUTLO']) #- above 3 sigma in counts
npixhi=qalib.countpix(image.pix,nsig=param['CUTHI']) #- above 10 sigma in counts
npix_err='NORMAL'
#- get the counts for each amp
if amps:
npixlo_amps=[]
npixhi_amps=[]
#- get amp boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
ampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
npixlo_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTLO'])
npixlo_amps.append(npixlo_thisamp)
npixhi_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTHI'])
npixhi_amps.append(npixhi_thisamp)
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps,"NPIX_STAT":npix_err}
else:
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_STAT":npix_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_countpix
plot_countpix(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Integrate_Spec(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="INTEG"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "INTEG_AVG"
status=kwargs['statKey'] if 'statKey' in kwargs else "MAGDIFF_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "MAGDIFF_WARN_RANGE" in parms and "MAGDIFF_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["MAGDIFF_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["MAGDIFF_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps, dict_countbins=dict_countbins, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,dict_countbins=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME" ] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
#- get the integrals for all fibers
flux=frame.flux
wave=frame.wave
integrals=np.zeros(flux.shape[0])
for ii in range(len(integrals)):
integrals[ii]=qalib.integrate_spec(wave,flux[ii])
#- average integrals over fibers of each object type and get imaging magnitudes
integ_avg_tgt=[]
mag_avg_tgt=[]
for T in ["ELG","QSO","LRG","STD"]:
fibers=np.where(frame.fibermap['OBJTYPE']==T)[0]
if len(fibers) < 1:
log.warning("no {} fibers found.".format(T))
magnitudes=frame.fibermap['MAG'][fibers]
mag_avg=np.mean(magnitudes)
mag_avg_tgt.append(mag_avg)
integ=integrals[fibers]
integ_avg=np.mean(integ)
integ_avg_tgt.append(integ_avg)
if T == "STD":
starfibers=fibers
int_stars=integ
int_average=integ_avg
# simple, temporary magdiff calculation (to be corrected...)
magdiff_avg=[]
for i in range(len(mag_avg_tgt)):
mag_fib=-2.5*np.log(integ_avg_tgt[i]/frame.meta["EXPTIME"])+30.
if mag_avg_tgt[i] != np.nan:
magdiff=mag_fib-mag_avg_tgt[i]
else:
magdiff=nan
magdiff_avg.append(magdiff)
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"MAGDIFF_NORMAL_RANGE":[-0.5, 0.5],
"MAGDIFF_WARN_RANGE":[-1.0, 1.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['MAGDIFF_TGT_REF']=kwargs["REFERENCE"]
magdiff_avg_amp = [0.0]
magdiff_err='NORMAL'
#- get the counts for each amp
if amps:
#- get the fiducial boundary
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
int_avg_amps=np.zeros(4)
for amp in range(4):
wave=frame.wave[fidboundary[amp][1]]
select_thisamp=starfibers[(starfibers >= fidboundary[amp][0].start) & (starfibers < fidboundary[amp][0].stop)]
stdflux_thisamp=frame.flux[select_thisamp,fidboundary[amp][1]]
if len(stdflux_thisamp)==0:
continue
else:
integ_thisamp=np.zeros(stdflux_thisamp.shape[0])
for ii in range(stdflux_thisamp.shape[0]):
integ_thisamp[ii]=qalib.integrate_spec(wave,stdflux_thisamp[ii])
int_avg_amps[amp]=np.mean(integ_thisamp)
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp,"MAGDIFF_STAT":magdiff_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_STAT":magdiff_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_integral
plot_integral(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Continuum(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYCONT"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "SKYCONT"
status=kwargs['statKey'] if 'statKey' in kwargs else "SKYCONT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "SKYCONT_WARN_RANGE" in parms and "SKYCONT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["SKYCONT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["SKYCONT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
camera=input_frame.meta["CAMERA"]
wrange1=None
wrange2=None
if "wrange1" in kwargs:
wrange1=kwargs["wrange1"]
if "wrange2" in kwargs:
wrange2=kwargs["wrange2"]
if wrange1==None:
if camera[0]=="b": wrange1= "4000,4500"
if camera[0]=="r": wrange1= "5950,6200"
if camera[0]=="z": wrange1= "8120,8270"
if wrange2==None:
if camera[0]=="b": wrange2= "5250,5550"
if camera[0]=="r": wrange2= "6990,7230"
if camera[0]=="z": wrange2= "9110,9280"
paname=None
if "paname" in kwargs:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig=None
return self.run_qa(fibermap,input_frame,wrange1=wrange1,wrange2=wrange2,paname=paname,amps=amps, dict_countbins=dict_countbins,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,wrange1=None,wrange2=None,
paname=None,amps=False,dict_countbins=None,
qafile=None,qafig=None, param=None, qlf=False,
refmetrics=None):
#- qa dictionary
retval={}
retval["PANAME" ]= paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = {}
for key in ['B_CONT','R_CONT', 'Z_CONT', 'SKYCONT_WARN_RANGE', 'SKYCONT_ALARM_RANGE']:
param[key] = desi_params['qa']['skysub']['PARAMS'][key]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['SKYCONT_REF']=kwargs["REFERENCE"]
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(
frame, wrange1, wrange2)
skycont_err = 'NORMAL'
if amps:
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
k1=np.where(skyfiber < fidboundary[0][0].stop)[0]
maxsky_index=max(k1)
contamp1=np.mean(contfiberlow[:maxsky_index])
contamp3=np.mean(contfiberhigh[:maxsky_index])
if fidboundary[1][0].start >=fidboundary[0][0].stop:
k2=np.where(skyfiber > fidboundary[1][0].start)[0]
minsky_index=min(k2)
contamp2=np.mean(contfiberlow[minsky_index:])
contamp4=np.mean(contfiberhigh[minsky_index:])
else:
contamp2=0
contamp4=0
skycont_amps=np.array((contamp1,contamp2,contamp3,contamp4)) #- in four amps regions
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps, "SKYCONT_STAT":skycont_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_STAT":skycont_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_continuum
plot_sky_continuum(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Peaks(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYPEAK"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "PEAKCOUNT_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "PEAKCOUNT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "PEAKCOUNT_WARN_RANGE" in parms and "PEAKCOUNT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["PEAKCOUNT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["PEAKCOUNT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image, got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs:
qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps,psf=psf, qafile=qafile, qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from lvmspec.qa.qalib import sky_peaks
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = camera = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
# Parameters
if param is None:
log.info("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = desi_params['qa']['skypeaks']['PARAMS']
# Run
nspec_counts, sky_counts = sky_peaks(param, frame, amps=amps)
rms_nspec = qalib.getrms(nspec_counts)
rms_skyspec = qalib.getrms(sky_counts)
sumcount_med_sky=[]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['PEAKCOUNT_REF']=kwargs["REFERENCE"]
# retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec}
sumcount_err='NORMAL'
retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec,"PEAKCOUNT_STAT":sumcount_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_peaks
plot_sky_peaks(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Calc_XWSigma(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="XWSIGMA"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "WSIGMA_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "XWSIGMA_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "XWSIGMA_WARN_RANGE" in parms and "XWSIGMA_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["XWSIGMA_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["XWSIGMA_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
fibermap = None
if "FiberMap" in kwargs:
fibermap=kwargs["FiberMap"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_image,paname=paname,amps=amps,psf=psf, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,image,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from scipy.optimize import curve_fit
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = camera = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
if image.meta["FLAVOR"] == 'arc':
param = {
"B_PEAKS":[4047.7, 4359.6, 5087.2],
"R_PEAKS":[6144.8, 6508.3, 6600.8, 6718.9, 6931.4, 7034.4,],
"Z_PEAKS":[8379.9, 8497.7, 8656.8, 8783.0],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
else:
param = {
"B_PEAKS":[3914.4, 5199.3, 5578.9],
"R_PEAKS":[6301.9, 6365.4, 7318.2, 7342.8, 7371.3],
"Z_PEAKS":[8401.5, 8432.4, 8467.5, 9479.4, 9505.6, 9521.8],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
dw=2.
dp=3
b_peaks=param['B_PEAKS']
r_peaks=param['R_PEAKS']
z_peaks=param['Z_PEAKS']
if fibermap["OBJTYPE"][0] == 'ARC':
import lvmspec.psf
psf=lvmspec.psf.PSF(psf)
xsigma=[]
wsigma=[]
xsigma_sky=[]
wsigma_sky=[]
xsigma_amp1=[]
wsigma_amp1=[]
xsigma_amp2=[]
wsigma_amp2=[]
xsigma_amp3=[]
wsigma_amp3=[]
xsigma_amp4=[]
wsigma_amp4=[]
if fibermap['FIBER'].shape[0] >= 500:
fibers = 500
else:
fibers = fibermap['FIBER'].shape[0]
for i in range(fibers):
if camera[0]=="b":
peak_wave=np.array([b_peaks[0]-dw,b_peaks[0]+dw,b_peaks[1]-dw,b_peaks[1]+dw,b_peaks[2]-dw,b_peaks[2]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsig=np.array([xsigma1,xsigma2,xsigma3])
wsig=np.array([wsigma1,wsigma2,wsigma3])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if camera[0]=="r":
peak_wave=np.array([r_peaks[0]-dw,r_peaks[0]+dw,r_peaks[1]-dw,r_peaks[1]+dw,r_peaks[2]-dw,r_peaks[2]+dw,r_peaks[3]-dw,r_peaks[3]+dw,r_peaks[4]-dw,r_peaks[4]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpix_peak4=np.arange(int(round(xpix[6]))-dp,int(round(xpix[7]))+dp+1,1)
ypix_peak4=np.arange(int(round(ypix[6])),int(round(ypix[7])),1)
xpix_peak5=np.arange(int(round(xpix[8]))-dp,int(round(xpix[9]))+dp+1,1)
ypix_peak5=np.arange(int(round(ypix[8])),int(round(ypix[9])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xpopt4,xpcov4=curve_fit(qalib.gauss,np.arange(len(xpix_peak4)),image.pix[int(np.mean(ypix_peak4)),xpix_peak4])
wpopt4,wpcov4=curve_fit(qalib.gauss,np.arange(len(ypix_peak4)),image.pix[ypix_peak4,int(np.mean(xpix_peak4))])
xpopt5,xpcov5=curve_fit(qalib.gauss,np.arange(len(xpix_peak5)),image.pix[int(np.mean(ypix_peak5)),xpix_peak5])
wpopt5,wpcov5=curve_fit(qalib.gauss,np.arange(len(ypix_peak5)),image.pix[ypix_peak5,int(np.mean(xpix_peak5))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsigma4=np.abs(xpopt4[2])
wsigma4=np.abs(wpopt4[2])
xsigma5=np.abs(xpopt5[2])
wsigma5=np.abs(wpopt5[2])
xsig=np.array([xsigma1,xsigma2,xsigma3,xsigma4,xsigma5])
wsig=np.array([wsigma1,wsigma2,wsigma3,wsigma4,wsigma5])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if camera[0]=="z":
peak_wave=np.array([z_peaks[0]-dw,z_peaks[0]+dw,z_peaks[1]-dw,z_peaks[1]+dw,z_peaks[2]-dw,z_peaks[2]+dw,z_peaks[3]-dw,z_peaks[3]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpix_peak4=np.arange(int(round(xpix[6]))-dp,int(round(xpix[7]))+dp+1,1)
ypix_peak4=np.arange(int(round(ypix[6])),int(round(ypix[7])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xpopt4,xpcov4=curve_fit(qalib.gauss,np.arange(len(xpix_peak4)),image.pix[int(np.mean(ypix_peak4)),xpix_peak4])
wpopt4,wpcov4=curve_fit(qalib.gauss,np.arange(len(ypix_peak4)),image.pix[ypix_peak4,int(np.mean(xpix_peak4))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsigma4=np.abs(xpopt4[2])
wsigma4=np.abs(wpopt4[2])
xsig=np.array([xsigma1,xsigma2,xsigma3,xsigma4])
wsig=np.array([wsigma1,wsigma2,wsigma3,wsigma4])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if fibermap['OBJTYPE'][i]=='SKY':
xsigma_sky=xsigma
wsigma_sky=wsigma
if amps:
if fibermap['FIBER'][i]<240:
if camera[0]=="b":
xsig_amp1=np.array([xsigma1])
xsig_amp3=np.array([xsigma2,xsigma3])
wsig_amp1=np.array([wsigma1])
wsig_amp3=np.array([wsigma2,wsigma3])
if camera[0]=="r":
xsig_amp1=np.array([xsigma1,xsigma2])
xsig_amp3=np.array([xsigma3,xsigma4,xsigma5])
wsig_amp1=np.array([wsigma1,wsigma2])
wsig_amp3=np.array([wsigma3,wsigma4,wsigma5])
if camera[0]=="z":
xsig_amp1=np.array([xsigma1,xsigma2,xsigma3])
xsig_amp3=np.array([xsigma4])
wsig_amp1=np.array([wsigma1,wsigma2,wsigma3])
wsig_amp3=np.array([wsigma4])
xsigma_amp1.append(xsig_amp1)
wsigma_amp1.append(wsig_amp1)
xsigma_amp3.append(xsig_amp3)
wsigma_amp3.append(wsig_amp3)
if fibermap['FIBER'][i]>260:
if camera[0]=="b":
xsig_amp2=np.array([xsigma1])
xsig_amp4=np.array([xsigma2,xsigma3])
wsig_amp2=np.array([wsigma1])
wsig_amp4=np.array([wsigma2,wsigma3])
if camera[0]=="r":
xsig_amp2=np.array([xsigma1,xsigma2])
xsig_amp4=np.array([xsigma3,xsigma4,xsigma5])
wsig_amp2=np.array([wsigma1,wsigma2])
wsig_amp4=np.array([wsigma3,wsigma4,wsigma5])
if camera[0]=="z":
xsig_amp2=np.array([xsigma1,xsigma2,xsigma3])
xsig_amp4=np.array([xsigma4])
wsig_amp2=np.array([wsigma1,wsigma2,wsigma3])
wsig_amp4=np.array([wsigma4])
xsigma_amp2.append(xsig_amp2)
wsigma_amp2.append(wsig_amp2)
xsigma_amp4.append(xsig_amp4)
wsigma_amp4.append(wsig_amp4)
if fibermap['FIBER'].shape[0]<260:
xsigma_amp2=np.zeros(len(xsigma))
xsigma_amp4=np.zeros(len(xsigma))
wsigma_amp2=np.zeros(len(wsigma))
wsigma_amp4=np.zeros(len(wsigma))
xsigma=np.array(xsigma)
wsigma=np.array(wsigma)
xsigma_med=np.median(xsigma)
wsigma_med=np.median(wsigma)
xsigma_med_sky=np.median(xsigma_sky)
wsigma_med_sky=np.median(wsigma_sky)
xwsigma=np.array([xsigma_med_sky,wsigma_med_sky])
xamp1_med=np.median(xsigma_amp1)
xamp2_med=np.median(xsigma_amp2)
xamp3_med=np.median(xsigma_amp3)
xamp4_med=np.median(xsigma_amp4)
wamp1_med=np.median(wsigma_amp1)
wamp2_med=np.median(wsigma_amp2)
wamp3_med=np.median(wsigma_amp3)
wamp4_med=np.median(wsigma_amp4)
xsigma_amp=np.array([xamp1_med,xamp2_med,xamp3_med,xamp4_med])
wsigma_amp=np.array([wamp1_med,wamp2_med,wamp3_med,wamp4_med])
xshift=0.0
wshift=0.0
xshift_fib=[]
wshift_fib=[]
xshift_amp=[]
wshift_amp=[]
shift_warn=[]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['XWSIGMA_REF']=kwargs["REFERENCE"]
shift_err='NORMAL'
if amps:
# retval["METRICS"]={"RA":ra,"DEC":dec, "XSIGMA":xsigma,"XSIGMA_MED":xsigma_med,"XSIGMA_AMP":xsigma_amp,"XSIGMA_MED_SKY":xsigma_med_sky,"XSHIFT":xshift,"XSHIFT_FIB":xshift_fib,"XSHIFT_AMP":xshift_amp,"WSIGMA":wsigma,"WSIGMA_MED":wsigma_med,"WSIGMA_AMP":wsigma_amp,"WSIGMA_MED_SKY":wsigma_med_sky,"WSHIFT":wshift,"WSHIFT_FIB":wshift_fib,"WSHIFT_AMP":wshift_amp,"XWSIGMA":xwsigma}
retval["METRICS"]={"RA":ra,"DEC":dec, "XSIGMA":xsigma,"XSIGMA_MED":xsigma_med,"XSIGMA_AMP":xsigma_amp,"XSHIFT":xshift,"XSHIFT_FIB":xshift_fib,"XSHIFT_AMP":xshift_amp,"WSIGMA":wsigma,"WSIGMA_MED":wsigma_med,"WSIGMA_AMP":wsigma_amp,"WSHIFT":wshift,"WSHIFT_FIB":wshift_fib,"WSHIFT_AMP":wshift_amp,"XWSIGMA":xwsigma,"XWSIGMA_STAT":shift_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "XSIGMA":xsigma,"XSIGMA_MED":xsigma_med,"XSIGMA_MED_SKY":xsigma_med_sky,"XSHIFT":xshift,"XSHIFT_FIB":xshift_fib,"WSIGMA":wsigma,"WSIGMA_MED":wsigma_med,"WSIGMA_MED_SKY":wsigma_med_sky,"WSHIFT":wshift,"WSHIFT_FIB":wshift_fib,"XWSIGMA":xwsigma}
retval["METRICS"]={"RA":ra,"DEC":dec, "XSIGMA":xsigma,"XSIGMA_MED":xsigma_med,"XSIGMA_MED_SKY":xsigma_med_sky,"XSHIFT":xshift,"XSHIFT_FIB":xshift_fib,"WSIGMA":wsigma,"WSIGMA_MED":wsigma_med,"WSIGMA_MED_SKY":wsigma_med_sky,"WSHIFT":wshift,"WSHIFT_FIB":wshift_fib,"XWSIGMA_STAT":shift_err}
#- http post if needed
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_XWSigma
plot_XWSigma(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Bias_From_Overscan(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="BIAS_OVERSCAN"
import astropy
rawtype=astropy.io.fits.hdu.hdulist.HDUList
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "BIAS_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "BIAS_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "BIAS_WARN_RANGE" in parms and "BIAS_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["BIAS_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["BIAS_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,rawtype,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
input_raw=args[0]
camera=kwargs["camera"]
paname=None
if "paname" in kwargs:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig=None
return self.run_qa(input_raw,camera,paname=paname,amps=amps, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,raw,camera,paname=None,amps=False,qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
rawimage=raw[camera.upper()].data
header=raw[camera.upper()].header
retval={}
retval["EXPID"]= '{0:08d}'.format(header["EXPID"])
retval["CAMERA"] = camera
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["FLAVOR"] = header["FLAVOR"]
if retval["FLAVOR"] == 'arc':
pass
else:
retval["PROGRAM"] = header["PROGRAM"]
retval["NIGHT"] = header["NIGHT"]
kwargs=self.config['kwargs']
rawimage=raw[camera.upper()].data
header=raw[camera.upper()].header
if 'INHERIT' in header and header['INHERIT']:
h0 = raw[0].header
for key in h0:
if key not in header:
header[key] = h0[key]
data=[]
row_data_amp1=[]
row_data_amp2=[]
row_data_amp3=[]
row_data_amp4=[]
bias_overscan=[]
for kk in ['1','2','3','4']:
from lvmspec.preproc import _parse_sec_keyword
sel=_parse_sec_keyword(header['BIASSEC'+kk])
#- Obtain counts/second in bias region
pixdata=rawimage[sel]/header["EXPTIME"]
if kk == '1':
for i in range(pixdata.shape[0]):
row_amp1=pixdata[i]
row_data_amp1.append(row_amp1)
if kk == '2':
for i in range(pixdata.shape[0]):
row_amp2=pixdata[i]
row_data_amp2.append(row_amp2)
if kk == '3':
for i in range(pixdata.shape[0]):
row_amp3=pixdata[i]
row_data_amp3.append(row_amp3)
if kk == '4':
for i in range(pixdata.shape[0]):
row_amp4=pixdata[i]
row_data_amp4.append(row_amp4)
#- Compute statistics of the bias region that only reject
# the 0.5% of smallest and largest values. (from sdssproc)
isort=np.sort(pixdata.ravel())
nn=isort.shape[0]
bias=np.mean(isort[int(0.005*nn) : int(0.995*nn)])
bias_overscan.append(bias)
data.append(isort)
row_data_bottom=[]
row_data_top=[]
for i in range(len(row_data_amp1)):
row_data_lower=np.concatenate((row_data_amp1[i],row_data_amp2[i]))
row_data_upper=np.concatenate((row_data_amp3[i],row_data_amp4[i]))
row_data_bottom.append(row_data_lower)
row_data_top.append(row_data_upper)
row_data=np.concatenate((row_data_bottom,row_data_top))
mean_row=[]
for i in range(len(row_data)):
mean=np.mean(row_data[i])
mean_row.append(mean)
full_data=np.concatenate((data[0],data[1],data[2],data[3])).ravel()
bias=np.mean(bias_overscan)
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"PERCENTILES":[68.2,95.4,99.7],
"BIAS_NORMAL_RANGE":[-1.0, 1.0],
"BIAS_WARN_RANGE:":[-2.0, 2.0]
}
sig1_lo = np.percentile(full_data,(100.-param['PERCENTILES'][0])/2.)
sig1_hi = np.percentile(full_data,100.-sig1_lo)
sig2_lo = np.percentile(full_data,(100.-param['PERCENTILES'][1])/2.)
sig2_hi = np.percentile(full_data,100.-sig2_lo)
sig3_lo = np.percentile(full_data,(100.-param['PERCENTILES'][2])/2.)
sig3_hi = np.percentile(full_data,100.-sig3_lo)
diff1sig = sig1_hi - sig1_lo
diff2sig = sig2_hi - sig2_lo
diff3sig = sig3_hi - sig3_lo
sig5_value = np.percentile(full_data,100.-99.99994)
data5sig = len(np.where(full_data <= sig5_value)[0])
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['BIAS_AMP_REF']=kwargs["REFERENCE"]
biasdiff_err='NORMAL'
if amps:
bias_amps=np.array(bias_overscan)
# retval["METRICS"]={'BIAS':bias,'BIAS_AMP':bias_amps,"DIFF1SIG":diff1sig,"DIFF2SIG":diff2sig,"DIFF3SIG":diff3sig,"DATA5SIG":data5sig,"MEANBIAS_ROW":mean_row}
retval["METRICS"]={'BIAS':bias,'BIAS_AMP':bias_amps,"DIFF1SIG":diff1sig,"DIFF2SIG":diff2sig,"DIFF3SIG":diff3sig,"DATA5SIG":data5sig,"MEANBIAS_ROW":mean_row,"BIAS_STAT":biasdiff_err}
else:
# retval["METRICS"]={'BIAS':bias,"DIFF1SIG":diff1sig,"DIFF2SIG":diff2sig,"DIFF3SIG":diff3sig,"DATA5SIG":data5sig,"MEANBIAS_ROW":mean_row}
retval["METRICS"]={'BIAS':bias,"DIFF1SIG":diff1sig,"DIFF2SIG":diff2sig,"DIFF3SIG":diff3sig,"DATA5SIG":data5sig,"MEANBIAS_ROW":mean_row,"BIAS_STAT":biasdiff_err}
#- http post if needed
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_bias_overscan
plot_bias_overscan(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class CountSpectralBins(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="COUNTBINS"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NGOODFIB"
status=kwargs['statKey'] if 'statKey' in kwargs else "NGOODFIB_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NGOODFIB_WARN_RANGE" in parms and "NGOODFIB_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[( | np.asarray(parms["NGOODFIB_WARN_RANGE"]) | numpy.asarray |
"""Toy cluttered table domain.
This environment is created to test our planner's ability to handle
failures reported by the environment.
"""
from typing import Dict, List, Optional, Sequence, Set
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from gym.spaces import Box
from predicators.src import utils
from predicators.src.envs import BaseEnv
from predicators.src.settings import CFG
from predicators.src.structs import Action, Array, GroundAtom, Object, \
ParameterizedOption, Predicate, State, Task, Type
class ClutteredTableEnv(BaseEnv):
"""Toy cluttered table domain."""
def __init__(self) -> None:
super().__init__()
# Types
self._can_type = Type(
"can", ["pose_x", "pose_y", "radius", "is_grasped", "is_trashed"])
# Predicates
self._HandEmpty = Predicate("HandEmpty", [], self._HandEmpty_holds)
self._Holding = Predicate("Holding", [self._can_type],
self._Holding_holds)
self._Untrashed = Predicate("Untrashed", [self._can_type],
self._Untrashed_holds)
# Options
self._Grasp = utils.SingletonParameterizedOption(
"Grasp",
self._Grasp_policy,
types=[self._can_type],
params_space=Box(0, 1, (4, )))
self._Dump = utils.SingletonParameterizedOption(
"Dump", self._Dump_policy)
@classmethod
def get_name(cls) -> str:
return "cluttered_table"
def simulate(self, state: State, action: Action) -> State:
assert self.action_space.contains(action.arr)
next_state = state.copy()
# Figure out which can is currently grasped, if any.
grasped_can = None
for can in state:
if state.get(can, "is_grasped") > 0.5:
assert grasped_can is None, "Multiple cans grasped?"
assert state.get(can, "is_trashed") < 0.5, \
"Grasped a can that has been trashed?"
grasped_can = can
if np.all(action.arr == 0.0):
# Handle dumping action.
if grasped_can is not None:
next_state.set(grasped_can, "pose_x", -999)
next_state.set(grasped_can, "pose_y", -999)
next_state.set(grasped_can, "is_grasped", 0.0)
next_state.set(grasped_can, "is_trashed", 1.0)
return next_state
# Handle grasping action.
if grasped_can is not None:
return next_state # can't grasp while already grasping
start_x, start_y, end_x, end_y = action.arr
desired_can = None
for can in state:
this_x = state.get(can, "pose_x")
this_y = state.get(can, "pose_y")
this_radius = state.get(can, "radius")
if np.linalg.norm([end_x - this_x, end_y - this_y]) < this_radius:
assert desired_can is None
desired_can = can
if desired_can is None:
return next_state # end point wasn't at any can
self._check_collisions(start_x, start_y, end_x, end_y, state,
desired_can)
# No collisions, update state and return.
next_state.set(desired_can, "is_grasped", 1.0)
return next_state
def _generate_train_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_train_tasks, train_or_test="train")
def _generate_test_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_test_tasks, train_or_test="test")
@property
def predicates(self) -> Set[Predicate]:
return {self._HandEmpty, self._Holding, self._Untrashed}
@property
def goal_predicates(self) -> Set[Predicate]:
return {self._Holding}
@property
def types(self) -> Set[Type]:
return {self._can_type}
@property
def options(self) -> Set[ParameterizedOption]:
return {self._Grasp, self._Dump}
@property
def action_space(self) -> Box:
# The action_space is 4-dimensional. The first two dimensions are the
# start point of the vector corresponding to the grasp approach. The
# last two dimensions are the end point. Dumping is a special action
# where all 4 dimensions are 0.
return Box(0, 1, (4, ))
def render_state_plt(
self,
state: State,
task: Task,
action: Optional[Action] = None,
caption: Optional[str] = None) -> matplotlib.figure.Figure:
fig, ax = plt.subplots(1, 1)
ax.set_aspect('equal')
assert len(task.goal) == 1
goal_atom = next(iter(task.goal))
assert goal_atom.predicate == self._Holding
assert len(goal_atom.objects) == 1
goal_can = goal_atom.objects[0]
# Draw cans
lw = 1
goal_color = "green"
other_color = "red"
lcolor = "black"
for can in state:
if state.get(can, "is_grasped"):
circ = plt.Circle(
(state.get(can, "pose_x"), state.get(can, "pose_y")),
1.75 * state.get(can, "radius"),
facecolor="gray",
alpha=0.5)
ax.add_patch(circ)
if can == goal_can:
c = goal_color
else:
c = other_color
circ = plt.Circle(
(state.get(can, "pose_x"), state.get(can, "pose_y")),
state.get(can, "radius"),
linewidth=lw,
edgecolor=lcolor,
facecolor=c)
ax.add_patch(circ)
# Draw action
if action:
start_x, start_y, end_x, end_y = action.arr
dx, dy = end_x - start_x, end_y - start_y
arrow = plt.Arrow(start_x, start_y, dx, dy, width=0.1)
ax.add_patch(arrow)
plt.xlim(-0.1, 1.1)
plt.ylim(-0.1, 1.1)
plt.xticks([])
plt.yticks([])
if caption is not None:
plt.suptitle(caption, wrap=True)
plt.tight_layout()
return fig
def _get_tasks(self, num: int, train_or_test: str) -> List[Task]:
tasks = []
cans = []
for i in range(
max(CFG.cluttered_table_num_cans_train,
CFG.cluttered_table_num_cans_test)):
cans.append(Object(f"can{i}", self._can_type))
goal = {GroundAtom(self._Holding, [cans[0]])}
for _ in range(num):
tasks.append(
Task(self._create_initial_state(cans, train_or_test), goal))
return tasks
def _create_initial_state(self, cans: List[Object],
train_or_test: str) -> State:
data: Dict[Object, Array] = {}
assert train_or_test in ("train", "test")
if train_or_test == "train":
num_cans = CFG.cluttered_table_num_cans_train
rng = self._train_rng
elif train_or_test == "test":
num_cans = CFG.cluttered_table_num_cans_test
rng = self._test_rng
radius = CFG.cluttered_table_can_radius
for i in range(num_cans):
can = cans[i]
while True:
# keep cans near center of table to allow grasps from all angles
pose = np.array(rng.uniform(0.25, 0.75, size=2),
dtype=np.float32)
if not self._any_intersection(pose, radius, data):
break
# [pose_x, pose_y, radius, is_grasped, is_trashed]
data[can] = np.array([pose[0], pose[1], radius, 0.0, 0.0])
return State(data)
@staticmethod
def _HandEmpty_holds(state: State, objects: Sequence[Object]) -> bool:
assert not objects
for can in state:
if state.get(can, "is_grasped") > 0.5:
return False
return True
@staticmethod
def _Holding_holds(state: State, objects: Sequence[Object]) -> bool:
can, = objects
return state.get(can, "is_grasped") > 0.5
@staticmethod
def _Untrashed_holds(state: State, objects: Sequence[Object]) -> bool:
can, = objects
return state.get(can, "is_trashed") < 0.5
@staticmethod
def _Grasp_policy(state: State, memory: Dict, objects: Sequence[Object],
params: Array) -> Action:
del state, memory, objects # unused
return Action(params) # action is simply the parameter
@staticmethod
def _Dump_policy(state: State, memory: Dict, objects: Sequence[Object],
params: Array) -> Action:
del state, memory, objects, params # unused
return Action(np.zeros(4,
dtype=np.float32)) # no parameter for dumping
@staticmethod
def _any_intersection(pose: Array, radius: float,
data: Dict[Object, Array]) -> bool:
for other in data:
other_feats = data[other]
other_x = other_feats[0]
other_y = other_feats[1]
other_radius = other_feats[2]
distance = np.linalg.norm([other_x - pose[0], other_y - pose[1]])
if distance <= (radius + other_radius):
return True
return False
@staticmethod
def _check_collisions(start_x: float,
start_y: float,
end_x: float,
end_y: float,
state: State,
ignored_can: Optional[Object] = None) -> None:
"""Handle collision checking.
We'll just threshold the angle between the grasp approach vector
and the vector between (end_x, end_y) and any other can. Doing
an actually correct geometric computation would involve the
radii somehow, but we don't really care about this. The argument
ignored_can is a can with which we don't care about colliding.
This is generally the desired can, but when attempting to place
a can, could also be the grasped can.
"""
vec1 = np.array([end_x - start_x, end_y - start_y])
colliding_can = None
colliding_can_max_dist = float("-inf")
for can in state:
if can == ignored_can:
continue
this_x = state.get(can, "pose_x")
this_y = state.get(can, "pose_y")
vec2 = np.array([end_x - this_x, end_y - this_y])
angle = np.arccos(
np.clip(
vec1.dot(vec2) /
(np.linalg.norm(vec1) * | np.linalg.norm(vec2) | numpy.linalg.norm |
#! /usr/bin/env python
# -*- coding:utf8 -*-
#
# utils_io.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import socket
import datetime
import time
import numpy as np
import pyvtk
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from pyPLANES.fem.fem_entities_surfacic import *
from pyPLANES.fem.fem_entities_volumic import *
# def initialisation_out_files_plain(self):
# pass
from pymls import from_yaml, Solver, Layer, backing
from mediapack import Air, Fluid
def load_material(mat):
if mat == "Air":
Air_mat = Air()
return Fluid(c=Air_mat.c,rho=Air_mat.rho)
else:
return from_yaml("materials/" + mat + ".yaml")
def result_pymls(**kwargs):
name_project = kwargs.get("name_project", "unnamed_project")
ml = kwargs.get("ml", False)
termination = kwargs.get("termination", "rigid")
theta_d = kwargs.get("theta_d", 45)
freq = kwargs.get("frequencies", np.array([440]))
plot_RT = kwargs.get("plot_RT", False)
solver = Solver()
for _l in ml:
mat = load_material(_l[0])
solver.layers.append(Layer(mat, _l[1]))
R = []
if termination in ["rigid", "Rigid", "Rigid Wall", "Wall"]:
solver.backing = backing.rigid
T = False
else:
T = []
solver.backing = backing.transmission
for _f in freq:
_ = solver.solve(_f, theta_d)
R.append(_["R"][0])
if termination == "transmission":
T.append(_["T"][0])
if plot_RT:
plt.figure(name_project + "/ Reflection coefficient")
plt.plot(freq, [_.real for _ in R], 'r',label="Re(R) pymls")
plt.plot(freq, [_.imag for _ in R], 'b',label="Im(R) pymls")
plt.legend()
if T is not False:
plt.figure(name_project + "/ Transmission coefficient")
plt.plot(freq, [_.real for _ in T], 'r',label="Re(T) pymls")
plt.plot(freq, [_.imag for _ in T], 'b',label="Im(T) pymls")
plt.legend()
return freq, R, T
def close_out_files(self):
duration = time.time()-self.start_time
self.info_file.write("Calculus ended at %s.\n"%(datetime.datetime.now()))
self.info_file.write("Total duration = {} s\n".format(duration))
self.info_file.write("duration / freq (averaged) = {} s\n".format(duration/len(self.frequencies)))
self.out_file.close()
self.info_file.close()
def print_entities(self):
for _ in self.entities:
print(_)
def print_elements(self):
for _ in self.elements[1:]:
print(_)
def print_vertices(self):
for _ in self.vertices[1:]:
print(_)
def print_edges(self):
for _ in self.edges:
print(_)
def print_faces(self):
for _ in self.faces:
print(_)
def print_model_entities(self):
for _ in self.model_entities:
print(_)
def print_reference_elements(self):
print(self.reference_elements)
def plot_fem_solution(self, kx=0.):
if self.plot[5]: # Determination of the maximum value of the pressure
p_max = 0
p_min = 1e308
for _en in self.entities:
if isinstance(_en, FluidFem):
for _elem in _en.elements:
_, __, p_elem = _elem.display_sol(3)
_max = np.amax(np.abs(p_elem))
_min = np.amin(np.abs(p_elem))
if _max >p_max: p_max = _max
if _min <p_min: p_min = _min
if any(self.plot[3:]):
x, y, u_x, u_y, pr = [], [], [], [], []
for _en in self.entities:
if isinstance(_en, FluidFem):
if any(self.plot[2::3]): # Plot of pressure == True
for ie, _elem in enumerate(_en.elements):
# print(ie/len(_en.elements))
x_elem, y_elem, p_elem = _elem.display_sol(3)
p_elem = p_elem[:, 0]
p_elem *= np.exp(1j*kx*x_elem)
if self.plot[2]:
plt.figure("Pressure")
plt.plot(y_elem, np.abs(p_elem), 'r+')
plt.plot(y_elem, np.imag(p_elem), 'm.')
if self.plot[5]:
triang = mtri.Triangulation(x_elem, y_elem)
plt.figure("Pressure map")
plt.tricontourf(triang, np.abs(p_elem), cmap=cm.jet, levels=np.linspace(p_min, p_max,40))
# x.extend(list(x_elem))
# y.extend(list(y_elem))
# pr.extend(list(p_elem))
elif isinstance(_en, PemFem):
if any(self.plot): # Plot of pressure == True
for _elem in _en.elements:
x_elem, y_elem, f_elem = _elem.display_sol([0, 1, 3])
ux_elem = f_elem[:, 0]*np.exp(1j*kx*x_elem)
uy_elem = f_elem[:, 1]*np.exp(1j*kx*x_elem)
p_elem = f_elem[:, 2]* | np.exp(1j*kx*x_elem) | numpy.exp |
import pytest
import numpy as np
from cascade import kappa
from cascade import group_offsets
from cascade import Cascade
from cascade import ScoreType
from cascade import IndicatorType
import factories
import fixtures
np.random.seed(42)
def test_kappa_with_empy_qid():
qid = np.array([])
cutoff = 5
assert [] == kappa([], cutoff, qid)
def _make_query_data(num_queries=1, depth=5, random_depth=False):
queries = np.random.choice(list(range(num_queries)),
num_queries,
replace=False)
scores = np.random.uniform(low=0., high=10., size=num_queries * depth)
qid = []
for x in queries:
qid += [x] * depth
return scores, np.array(qid)
def test_kappa_when_query_equals_cutoff():
cutoff = 5
query_depth = 5
scores, qid = _make_query_data(depth=query_depth)
topk = sorted(scores, reverse=True)
res = kappa(scores, cutoff, qid)
np.testing.assert_almost_equal(res, np.full_like(res, topk[cutoff - 1]))
def test_kappa_score_when_query_shorter_than_cutoff():
cutoff = 10
query_depth = 5
scores, qid = _make_query_data(depth=query_depth)
topk = sorted(scores, reverse=True)
res = kappa(scores, cutoff, qid)
np.testing.assert_almost_equal(res, np.full_like(res,
topk[query_depth - 1]))
def test_kappa_score_when_query_longer_than_cutoff():
cutoff = 5
query_depth = 10
scores, qid = _make_query_data(depth=query_depth)
topk = sorted(scores, reverse=True)
res = kappa(scores, cutoff, qid)
np.testing.assert_almost_equal(res, np.full_like(res, topk[cutoff - 1]))
def test_single_stage_cascade_resets_predict_attributes():
cascade = factories.dummy_cascade()
cascade.predict_reset()
for ranker in cascade:
assert None == ranker.predict
assert None == ranker.kappa
assert None == ranker.mask
assert None == ranker.estimate
def test_cascade_first_stage_has_no_score_mask():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=1, cutoffs=[5])
ranker = cascade.rankers[0]
cascade.predict(X, qid)
assert Cascade.SCORE_MASK not in ranker.predict
def test_cascade_first_stage_applies_cutoff():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=1, cutoffs=[2])
ranker = cascade.rankers[0]
ranker.booster.update()
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
expected = (b - a) * [0.01948363]
np.testing.assert_almost_equal(ranker.kappa[a:b], expected)
def test_cascade_first_stage_applies_mask():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=1, cutoffs=[2])
ranker = cascade.rankers[0]
ranker.booster.update()
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
expected = [0, 0, 1, 1, 0]
np.testing.assert_almost_equal(ranker.mask[a:b], expected)
def test_cascade_second_stage_applies_cutoff():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
topk = sorted(ranker_two.predict[a:b], reverse=True)
expected = (b - a) * [topk[1]]
np.testing.assert_almost_equal(ranker_two.kappa[a:b], expected)
def test_cascade_second_stage_applies_mask():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_one = cascade.rankers[0]
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid)
expected = [1, 0, 1, 1, 1]
np.testing.assert_almost_equal(ranker_one.mask[a:b], expected)
expected = [0, 0, 1, 1, 0]
np.testing.assert_almost_equal(ranker_two.mask[a:b], expected)
def test_cascade_score_mask_does_not_appear_in_first_stage():
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_one = cascade.rankers[0]
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
cascade.predict(X, qid, is_train=True)
assert Cascade.SCORE_MASK not in ranker_one.predict
def test_cascade_uses_score_mask():
"""As per previous implementation, always use the SCORE_MASK during predict
regardless of whether we are doing training or inference.
"""
X, _, qid = fixtures.train_data()
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.update()
ranker_one = cascade.rankers[0]
ranker_two = cascade.rankers[1]
offsets = group_offsets(qid)
a, b = next(offsets)
for is_train in [True, False]:
cascade.predict(X, qid, is_train=is_train)
assert Cascade.SCORE_MASK in ranker_two.predict
def test_cascade_computed_kappa_when_training():
qid = np.array([1, 1, 1, 1, 1])
offsets = group_offsets(qid)
a, b = next(offsets)
cascade = factories.dummy_cascade()
ranker = factories.ranker()
ranker.cutoff = 2
prev_mask = [1, 1, 0, 1, 1]
scores = np.array([0.1, 1.0, -0.03, 0.5, 0.25])
ranker.predict = np.copy(scores)
# according to previous mask
ranker.predict[2] = Cascade.SCORE_MASK
scores = cascade.ranker_apply_cutoff(ranker,
scores,
prev_mask,
qid,
is_train=True)
expected = [0.5] * 5
np.testing.assert_almost_equal(ranker.kappa[a:b], expected)
assert scores is not ranker.predict
def test_cascade_computed_kappa_when_inference():
qid = np.array([1, 1, 1, 1, 1])
offsets = group_offsets(qid)
a, b = next(offsets)
cascade = factories.dummy_cascade()
ranker = factories.ranker()
ranker.cutoff = 2
prev_mask = [1, 1, 0, 1, 1]
# put 10. to test if SCORE_MASK is used in `ranker_apply_cutoff`
scores = np.array([0.1, 1.0, 10., 0.5, 0.25])
ranker.predict = np.copy(scores)
# according to previous mask
ranker.predict[2] = Cascade.SCORE_MASK
scores = cascade.ranker_apply_cutoff(ranker,
scores,
prev_mask,
qid,
is_train=False)
expected = [0.5] * 5
np.testing.assert_almost_equal(ranker.kappa[a:b], expected)
assert scores is ranker.predict
def test_cascade_first_stage_score_any_type():
cascade = factories.cascade(num_stages=1, cutoffs=[4])
for name, member in ScoreType.__members__.items():
if member.name != name: # skip alias names
continue
cascade.set_score_type(name)
ranker_one = cascade.rankers[0]
cascade.ranker_score(ranker_one)
assert ranker_one.predict is ranker_one.estimate
def test_cascade_second_stage_score_independent_type():
cascade = factories.cascade(num_stages=2, cutoffs=[4, 2])
cascade.set_score_type('independent')
ranker_one = cascade.rankers[0]
ranker_one.mask = np.array([1, 1, 1, 1, 0])
ranker_one.estimate = np.array([4., 3., 2., 1., 0.])
ranker_two = cascade.rankers[1]
ranker_two.predict = | np.array([5., 5., 5., 5., 5.]) | numpy.array |
import numpy as np
import numpy.linalg as la
import subprocess as sub
import itertools as itt
import sys
import time
import uuid
from pathlib import Path
from .Fragments import Fragments
from .Potential import *
from .MBE_Potential import MBE_Potential
from .read_geometries import read_geoms
class Constants:
"""Helps me keep track of constants and conversions,
originally written by Mark (b3m2a1)."""
atomic_units = {
"wavenumbers" : 4.55634e-6,
"angstroms" : 1/0.529177,
"amu" : 1.000000000000000000/6.02213670000e23/9.10938970000e-28 #1822.88839 g/mol -> a.u.
}
masses = {
"H" : ( 1.00782503223, "amu"),
"O" : (15.99491561957, "amu"),
"D" : (2.0141017778,"amu"),
"C" : (11.9999999958,"amu"),
"N" : (14.003074,"amu")
}
@classmethod
def convert(cls, val, unit, to_AU = True):
vv = cls.atomic_units[unit]
return (val * vv) if to_AU else (val / vv)
@classmethod
def mass(cls, atom, to_AU = True):
m = cls.masses[atom]
if to_AU:
m = cls.convert(*m)
return m
class HarmonicAnalysis:
def __init__(self,eqGeom,atoms,potential,dx=1.0e-3,ofile=None):
self.eqGeom = eqGeom
self.atoms = atoms
self.potential = potential
self.dx = dx
self.ofile=ofile
self.nEls = 3 * len(self.atoms)
def genStencil(self,dispTup,dim):
cds = self.eqGeom
dx = self.dx
if dim == 1:
"""Generates 5-point 1D stencil for finite difference"""
atm = dispTup[0] #atom of interest
cd = dispTup[1] #x,y, or z
stnShape = np.concatenate(([5], | np.shape(cds) | numpy.shape |
# -*- coding: ISO-8859-1 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
u"""
dim
========
.. autoclass:: DimBase
"""
import datetime
import numpy as np
import six
from hftools.utils import is_numlike, is_integer, deprecate
from hftools.py3compat import integer_types
def dims_has_complex(dims):
for dim in reversed(dims):
dims = (ComplexDerivAxis, ComplexIndepAxis, ComplexDiagAxis)
if isinstance(dim, dims):
return True
return False
def info_has_complex(info):
deprecate("info_has_complex is deprecated")
return dims_has_complex(info)
def flatten(sequence):
for item in sequence:
if isinstance(item, (list, tuple)):
for subitem in flatten(item):
yield subitem
else:
yield item
class DimBase(object):
sortprio = 0
def __init__(self, Name, data=None, unit=None, name=None,
outputformat=None):
if isinstance(Name, DimBase):
dim_data = Name.data
dim_name = Name.name
dim_unit = Name.unit
dim_outputformat = Name.outputformat
else:
dim_data = data
dim_name = Name
dim_unit = unit
dim_outputformat = outputformat
if data is not None:
dim_data = data
if unit is not None:
dim_unit = unit
if name is not None:
dim_name = name
if outputformat is not None:
dim_outputformat = outputformat
if isinstance(dim_data, integer_types):
dim_data = list(range(dim_data))
if hasattr(dim_data, "tolist"):
dim_data = [dim_data.tolist()]
if not isinstance(dim_data, (list, tuple)):
dim_data = list(dim_data)
self._data = tuple(flatten(dim_data))
self._name = dim_name
self._unit = dim_unit
self._outputformat = dim_outputformat
@property
def data(self):
if len(self._data) == 0:
d = | np.array([]) | numpy.array |
import numpy as np
from abc import ABC, abstractmethod
from gym.spaces import Dict
class AbstractEnvRunner(ABC):
def __init__(self, *, env, model, nsteps):
self.env = env
self.model = model
self.nenv = nenv = env.num_envs if hasattr(env, 'num_envs') else 1
if isinstance(env.observation_space, Dict):
if 'depth' in env.observation_space.spaces:
self.batch_ob_shape = (
(nenv*nsteps,) + env.observation_space.spaces['depth'].shape,
(nenv*nsteps,) + env.observation_space.spaces['pointgoal'].shape
)
self.obs = (
np.zeros((nenv,) + env.observation_space.spaces['depth'].shape,
dtype=env.observation_space.spaces['depth'].dtype.name),
np.zeros((nenv,) + env.observation_space.spaces['pointgoal'].shape,
dtype=env.observation_space.spaces['pointgoal'].dtype.name)
)
obs_reset = env.reset()
batch_depth = []
batch_goal = []
for xb in range(len(obs_reset)):
batch_depth.append(obs_reset[xb]['depth'])
batch_goal.append(obs_reset[xb]['pointgoal'])
batch_depth = np.array(batch_depth)
batch_goal = | np.array(batch_goal) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created on Sun Dec 30 12:02:12 2018
# ppdire - Projection pursuit dimension reduction
# @author: <NAME> (Ponalytics)
#from .dicomo import dicomo
import numpy as np
from statsmodels.regression.quantile_regression import QuantReg
import statsmodels.robust as srs
import scipy.stats as sps
from scipy.linalg import pinv2
from scipy.optimize import minimize
import copy
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.base import RegressorMixin,BaseEstimator,TransformerMixin, defaultdict
from sklearn.utils.extmath import svd_flip
from ..sprm.rm import rm
from ..preprocessing.robcent import VersatileScaler
import warnings
from ..dicomo.dicomo import dicomo
from ..dicomo._dicomo_utils import *
from .capi import capi
from ._ppdire_utils import *
from ..preprocessing._preproc_utilities import scale_data
from ..utils.utils import MyException, convert_X_input, convert_y_input
import inspect
class ppdire(_BaseComposition,BaseEstimator,TransformerMixin,RegressorMixin):
"""
PPDIRE Projection Pursuit Dimension Reduction
The class allows for calculation of the projection pursuit optimization
either through `scipy.optimize` or through the grid algorithm, native to this
package. The class provides a very flexible way to access optimization of
projection indices that can lead to either classical or robust dimension
reduction. Optimization through scipy.optimize is much more efficient, yet
it will only provide correct results for classical projection indices. The
native grid algorithm should be used when the projection index involves
order statistics of any kind, such as ranks, trimming, winsorizing, or
empirical quantiles. The grid optimization algorithm for projection pursuit implemented here,
was outlined in:
<NAME>., <NAME>., <NAME>. and <NAME>.,
Robust multivariate methods: The projection pursuit approach,
in: From Data and Information Analysis to Knowledge Engineering,
Spiliopoulou, M., <NAME>., <NAME>., <NAME>. and <NAME>., eds.,
Springer Verlag, Berlin, Germany,
2006, pages 270--277.
Parameters
------------
projection_index : function or class.
dicomo and capi supplied in this package can both be used, but user defined projection indices can be processed ball covariance can be used.
pi_arguments : dict
arguments to be passed on to projection index
n_components : int
number of components to estimate
trimming : float
trimming percentage to be entered as pct/100
alpha : float.
Continuum coefficient. Only relevant if ppdire is used to estimate (classical or robust) continuum regression
optimizer : str.
Presently: either 'grid' (native optimizer) or any of the options in scipy-optimize (e.g. 'SLSQP')
optimizer_options : dict
with options to pass on to the optimizer
If optimizer == 'grid',
ndir: int: Number of directions to calculate per iteration.
maxiter: int. Maximal number of iterations.
optimizer_constraints : dict or list of dicts,
further constraints to be passed on to the optimizer function.
regopt : str.
regression option for regression step y~T. Can be set to 'OLS' (default), 'robust' (will run sprm.rm) or 'quantile' (statsmodels.regression.quantreg).
center : str,
how to center the data. options accepted are options from sprm.preprocessing
center_data : bool
scale_data : bool.
Note: if set to False, convergence to correct optimum is not a given. Will throw a warning.
whiten_data : bool.
Typically used for ICA (kurtosis as PI)
square_pi : bool.
Whether to square the projection index upon evaluation.
compression : bool.
Use internal data compresion step for flat data.
copy : bool.
Whether to make a deep copy of the input data or not.
verbose : bool.
Set to True prints the iteration number.
return_scaling_object : bool.
If True, the rescaling object will be returned.
Attributes
------------
Attributes always provided
- `x_weights_`: X block PPDIRE weighting vectors (usually denoted W)
- `x_loadings_`: X block PPDIRE loading vectors (usually denoted P)
- `x_scores_`: X block PPDIRE score vectors (usually denoted T)
- `x_ev_`: X block explained variance per component
- `x_Rweights_`: X block SIMPLS style weighting vectors (usually denoted R)
- `x_loc_`: X block location estimate
- `x_sca_`: X block scale estimate
- `crit_values_`: vector of evaluated values for the optimization objective.
- `Maxobjf_`: vector containing the optimized objective per component.
Attributes created when more than one block of data is provided:
- `C_`: vector of inner relationship between response and latent variables block
- `coef_`: vector of regression coefficients, if second data block provided
- `intercept_`: intercept
- `coef_scaled_`: vector of scaled regression coefficients (when scaling option used)
- `intercept_scaled_`: scaled intercept
- `residuals_`: vector of regression residuals
- `y_ev_`: y block explained variance
- `fitted_`: fitted response
- `y_loc_`: y location estimate
- `y_sca_`: y scale estimate
Attributes created only when corresponding input flags are `True`:
- `whitening_`: whitened data matrix (usually denoted K)
- `mixing_`: mixing matrix estimate
- `scaling_object_`: scaling object from `VersatileScaler`
"""
def __init__(self,
projection_index,
pi_arguments = {},
n_components = 1,
trimming = 0,
alpha = 1,
optimizer = 'SLSQP',
optimizer_options = {'maxiter': 100000},
optimizer_constraints = {},
regopt = 'OLS',
center = 'mean',
center_data=True,
scale_data=True,
whiten_data=False,
square_pi = False,
compression = False,
copy=True,
verbose=True,
return_scaling_object=True):
# Called arguments
self.projection_index = projection_index
self.pi_arguments = pi_arguments
self.n_components = n_components
self.trimming = trimming
self.alpha = alpha
self.optimizer = optimizer
self.optimizer_options = optimizer_options
self.optimizer_constraints = optimizer_constraints
self.regopt = regopt
self.center = center
self.center_data = center_data
self.scale_data = scale_data
self.whiten_data = whiten_data
self.square_pi = square_pi
self.compression = compression
self.copy = copy
self.verbose = verbose
self.return_scaling_object = return_scaling_object
# Other global parameters
self.constraint = 'norm'
self.optrange = (-1,1)
self.licenter = ['mean','median']
if not(self.center in self.licenter):
raise(ValueError('Only location estimator classes allowed are: "mean", "median"'))
def fit(self,X,*args,**kwargs):
"""
Fit a projection pursuit dimension reduction model.
Parameters
------------
X : numpy array
Input data.
"""
# Collect optional fit arguments
biascorr = kwargs.pop('biascorr',False)
if 'h' not in kwargs:
h = self.n_components
else:
h = kwargs.pop('h')
self.n_components = h
if 'dmetric' not in kwargs:
dmetric = 'euclidean'
else:
dmetric = kwargs.get('dmetric')
if 'mixing' not in kwargs:
mixing = False
else:
mixing = kwargs.get('mixing')
if 'y' not in kwargs:
na = len(args)
if na > 0: #Use of *args makes it sklearn consistent
flag = 'two-block'
y = args[0]
else:
flag = 'one-block'
y = 0 # to allow calls with 'y=y' in spit of no real y argument present
else:
flag = 'two-block'
y = kwargs.get('y')
if 'quantile' not in kwargs:
quantile = .5
else:
quantile = kwargs.get('quantile')
if self.regopt == 'robust':
if 'fun' not in kwargs:
fun = 'Hampel'
else:
fun = kwargs.get('fun')
if 'probp1' not in kwargs:
probp1 = 0.95
else:
probp1 = kwargs.get('probp1')
if 'probp2' not in kwargs:
probp2 = 0.975
else:
probp2 = kwargs.get('probp2')
if 'probp3' not in kwargs:
probp3 = 0.99
else:
probp3 = kwargs.get('probp3')
if self.projection_index == dicomo:
if self.pi_arguments['mode'] in ('M3','cos','cok'):
if 'option' not in kwargs:
option = 1
else:
option = kwargs.get('option')
if option > 3:
print('Option value >3 will compute results, but meaning may be questionable')
# Initiate projection index
self.most = self.projection_index(**self.pi_arguments)
# Initiate some parameters and data frames
if self.copy:
X0 = copy.deepcopy(X)
self.X0 = X0
else:
X0 = X
X = convert_X_input(X0)
n,p = X0.shape
trimming = self.trimming
# Check dimensions
if h > min(n,p):
raise(MyException('number of components cannot exceed number of samples'))
if (self.projection_index == dicomo and self.pi_arguments['mode'] == 'kurt' and self.whiten_data==False):
warnings.warn('Whitening step is recommended for ICA')
# Pre-processing adjustment if whitening
if self.whiten_data:
self.center_data = True
self.scale_data = False
self.compression = False
print('All results produced are for whitened data')
# Centring and scaling
if self.scale_data:
if self.center=='mean':
scale = 'std'
elif ((self.center=='median')|(self.center=='l1median')):
scale = 'mad'
else:
scale = 'None'
warnings.warn('Without scaling, convergence to optima is not given')
# Data Compression for flat tables if required
if ((p>n) and self.compression):
V,S,U = np.linalg.svd(X.T,full_matrices=False)
X = np.matmul(U.T,np.diag(S))
n,p = X.shape
if (srs.mad(X)==0).any():
warnings.warn('Due to low scales in data, compression would induce zero scales.'
+ '\n' + 'Proceeding without compression.')
dimensions = False
if copy:
X = copy.deepcopy(X0)
else:
X = X0
else:
dimensions = True
else:
dimensions = False
# Initiate centring object and scale X data
centring = VersatileScaler(center=self.center,scale=scale,trimming=trimming)
if self.center_data:
Xs = centring.fit_transform(X)
mX = centring.col_loc_
sX = centring.col_sca_
else:
Xs = X
mX = np.zeros((1,p))
sX = np.ones((1,p))
fit_arguments = {}
# Data whitening (best practice for ICA)
if self.whiten_data:
V,S,U = np.linalg.svd(Xs.T,full_matrices=False)
del U
K = (V/S)[:,:p]
del V,S
Xs = np.matmul(Xs, K)
Xs *= np.sqrt(p)
# Presently, X and y need to be matrices
# Will be changed to use regular np.ndarray
Xs = np.matrix(Xs)
# Pre-process y data when available
if flag != 'one-block':
ny = y.shape[0]
y = convert_y_input(y)
if len(y.shape) < 2:
y = np.matrix(y).reshape((ny,1))
# py = y.shape[1]
if ny != n:
raise(MyException('X and y number of rows must agree'))
if self.copy:
y0 = copy.deepcopy(y)
self.y0 = y0
if self.center_data:
ys = centring.fit_transform(y)
my = centring.col_loc_
sy = centring.col_sca_
else:
ys = y
my = 0
sy = 1
ys = np.matrix(ys).astype('float64')
else:
ys = None
# Initializing output matrices
W = np.zeros((p,h))
T = np.zeros((n,h))
P = np.zeros((p,h))
B = np.zeros((p,h))
R = np.zeros((p,h))
B_scaled = np.zeros((p,h))
C = np.zeros((h,1))
Xev = np.zeros((h,1))
assovec = np.zeros((h,1))
Maxobjf = np.zeros((h,1))
# Initialize deflation matrices
E = copy.deepcopy(Xs)
f = ys
bi = np.zeros((p,1))
opt_args = {
'alpha': self.alpha,
'trimming': self.trimming,
'biascorr': biascorr,
'dmetric' : 'euclidean',
}
if self.optimizer=='grid':
# Define grid optimization ranges
if 'ndir' not in self.optimizer_options:
self.optimizer_options['ndir'] = 1000
optrange = np.sign(self.optrange)
optmax = self.optrange[1]
stop0s = np.arcsin(optrange[0])
stop1s = np.arcsin(optrange[1])
stop1c = np.arccos(optrange[0])
stop0c = np.arccos(optrange[1])
anglestart = max(stop0c,stop0s)
anglestop = max(stop1c,stop1s)
nangle = np.linspace(anglestart,anglestop,self.optimizer_options['ndir'],endpoint=False)
alphamat = np.matrix([np.cos(nangle), np.sin(nangle)])
opt_args['_stop0c'] = stop0c
opt_args['_stop0s'] = stop0s
opt_args['_stop1c'] = stop1c
opt_args['_stop1s'] = stop1s
opt_args['optmax'] = optmax
opt_args['optrange'] = self.optrange
opt_args['square_pi'] = self.square_pi
if optmax != 1:
alphamat *= optmax
if p>2:
anglestart = min(opt_args['_stop0c'],opt_args['_stop0s'])
anglestop = min(opt_args['_stop1c'],opt_args['_stop1s'])
nangle = np.linspace(anglestart,anglestop,self.optimizer_options['ndir'],endpoint=True)
alphamat2 = np.matrix([np.cos(nangle), np.sin(nangle)])
if optmax != 1:
alphamat2 *= opt_args['optmax']
# Arguments for grid plane
opt_args['alphamat'] = alphamat,
opt_args['ndir'] = self.optimizer_options['ndir'],
opt_args['maxiter'] = self.optimizer_options['maxiter']
if type(opt_args['ndir'] is tuple):
opt_args['ndir'] = opt_args['ndir'][0]
# Arguments for grid plane #2
grid_args_2 = {
'alpha': self.alpha,
'alphamat': alphamat2,
'ndir': self.optimizer_options['ndir'],
'trimming': self.trimming,
'biascorr': biascorr,
'dmetric' : 'euclidean',
'_stop0c' : stop0c,
'_stop0s' : stop0s,
'_stop1c' : stop1c,
'_stop1s' : stop1s,
'optmax' : optmax,
'optrange' : self.optrange,
'square_pi' : self.square_pi
}
if flag=='two-block':
grid_args_2['y'] = f
if flag=='two-block':
opt_args['y'] = f
# Itertive coefficient estimation
for i in range(0,h):
if self.optimizer=='grid':
if p==2:
wi,maximo = gridplane(E,self.most,
pi_arguments=opt_args
)
elif p>2:
afin = np.zeros((p,1)) # final parameters for linear combinations
Z = copy.deepcopy(E)
# sort variables according to criterion
meas = [self.most.fit(E[:,k],
**opt_args)
for k in np.arange(0,p)]
if self.square_pi:
meas = np.square(meas)
wi,maximo = gridplane(Z[:,0:2],self.most,opt_args)
Zopt = Z[:,0:2]*wi
afin[0:2]=wi
for j in np.arange(2,p):
projmat = np.matrix([np.array(Zopt[:,0]).reshape(-1),
np.array(Z[:,j]).reshape(-1)]).T
wi,maximo = gridplane(projmat,self.most,
opt_args
)
Zopt = Zopt*float(wi[0]) + Z[:,j]*float(wi[1])
afin[0:(j+1)] = afin[0:(j+1)]*float(wi[0])
afin[j] = float(wi[1])
tj = Z*afin
objf = self.most.fit(tj,
**{**fit_arguments,**opt_args}
)
if self.square_pi:
objf *= objf
# outer loop to run until convergence
objfold = copy.deepcopy(objf)
objf = -1000
afinbest = afin
ii = 0
maxiter_2j = 2**round(np.log2(self.optimizer_options['maxiter']))
while ((ii < self.optimizer_options['maxiter'] + 1) and (abs(objfold - objf)/abs(objf) > 1e-4)):
for j in np.arange(0,p):
projmat = np.matrix([np.array(Zopt[:,0]).reshape(-1),
np.array(Z[:,j]).reshape(-1)]).T
if j > 16:
divv = maxiter_2j
else:
divv = min(2**j,maxiter_2j)
wi,maximo = gridplane_2(projmat,
self.most,
q=afin[j],
div=divv,
pi_arguments=grid_args_2
)
Zopt = Zopt*float(wi[0,0]) + Z[:,j]*float(wi[1,0])
afin *= float(wi[0,0])
afin[j] += float(wi[1,0])
# % evaluate the objective function:
tj = Z*afin
objfold = copy.deepcopy(objf)
objf = self.most.fit(tj,
q=afin,
**opt_args
)
if self.square_pi:
objf *= objf
if objf!=objfold:
if self.constraint == 'norm':
afinbest = afin/np.sqrt(np.sum(np.square(afin)))
else:
afinbest = afin
ii +=1
if self.verbose:
print(str(ii))
#endwhile
afinbest = afin
wi = np.zeros((p,1))
wi = afinbest
Maxobjf[i] = objf
# endif;%if p>2;
else: # do not optimize by the grid algorithm
if self.trimming > 0:
warnings.warn('Optimization that involves a trimmed objective is not a quadratic program. The scipy-optimize result will be off!!')
if 'center' in self.pi_arguments:
if (self.pi_arguments['center']=='median'):
warnings.warn('Optimization that involves a median in the objective is not a quadratic program. The scipy-optimize result will be off!!')
constraint = {'type':'eq',
'fun': lambda x: np.linalg.norm(x) -1,
}
if len(self.optimizer_constraints)>0:
constraint = [constraint,self.optimizer_constraints]
wi = minimize(pp_objective,
E[0,:].transpose(),
args=(self.most,E,opt_args),
method=self.optimizer,
constraints=constraint,
options=self.optimizer_options).x
wi = np.matrix(wi).reshape((p,1))
wi /= np.sqrt(np.sum(np.square(wi)))
# Computing projection weights and scores
ti = E*wi
if self.optimizer != 'grid':
Maxobjf[i] = self.most.fit(E*wi,**opt_args)
nti = np.linalg.norm(ti)
pi = E.T*ti / (nti**2)
if self.whiten_data:
wi /= np.sqrt((wi**2).sum())
wi = K*wi
wi0 = wi
wi = np.array(wi)
if len(W[:,i].shape) == 1:
wi = wi.reshape(-1)
W[:,i] = wi
T[:,i] = np.array(ti).reshape(-1)
P[:,i] = np.array(pi).reshape(-1)
if flag != 'one-block':
criteval = self.most.fit(E*wi0,
**opt_args
)
if self.square_pi:
criteval *= criteval
assovec[i] = criteval
# Deflation of the datamatrix guaranteeing orthogonality restrictions
E -= ti*pi.T
# Calculate R-Weights
R = np.dot(W[:,0:(i+1)],pinv2(np.dot(P[:,0:(i+1)].T,W[:,0:(i+1)]),check_finite=False))
# Execute regression y~T if y is present. Generate regression estimates.
if flag != 'one-block':
if self.regopt=='OLS':
ci = np.dot(ti.T,ys)/(nti**2)
elif self.regopt == 'robust':
linfit = rm(fun=fun,probp1=probp1,probp2=probp2,probp3=probp3,
centre=self.center,scale=scale,
start_cutoff_mode='specific',verbose=self.verbose)
linfit.fit(ti,ys)
ci = linfit.coef_
elif self.regopt == 'quantile':
linfit = QuantReg(y,ti)
model = linfit.fit(q=quantile)
ci = model.params
# end regression if
C[i] = ci
bi = np.dot(R,C[0:(i+1)])
bi_scaled = bi
bi = np.multiply(np.reshape(sy/sX,(p,1)),bi)
B[:,i] = bi[:,0]
B_scaled[:,i] = bi_scaled[:,0]
# endfor; Loop for latent dimensions
# Re-adjust estimates to original dimensions if data have been compressed
if dimensions:
B = np.matmul(V[:,0:p],B)
B_scaled = np.matmul(V[:,0:p],B_scaled)
R = np.matmul(V[:,0:p],R)
W = np.matmul(V[:,0:p],W)
P = np.matmul(V[:,0:p],P)
bi = B[:,h-1]
if self.center_data:
Xs = centring.fit_transform(X0)
mX = centring.col_loc_
sX = centring.col_sca_
else:
Xs = X0
mX = np.zeros((1,p))
sX = np.ones((1,p))
bi = bi.astype("float64")
if flag != 'one-block':
# Calculate scaled and unscaled intercepts
if dimensions:
X = convert_X_input(X0)
if(self.center == "mean"):
intercept = sps.trim_mean(y - np.matmul(X,bi),trimming)
else:
intercept = np.median(np.reshape(y - np.matmul(X,bi),(-1)))
yfit = np.matmul(X,bi) + intercept
if not(scale == 'None'):
if (self.center == "mean"):
b0 = np.mean(ys - np.matmul(Xs.astype("float64"),bi))
else:
b0 = np.median(np.array(ys.astype("float64") - np.matmul(Xs.astype("float64"),bi)))
else:
b0 = intercept
# Calculate fit values and residuals
yfit = yfit
r = y - yfit
setattr(self,"coef_",B)
setattr(self,"intercept_",intercept)
setattr(self,"coef_scaled_",B_scaled)
setattr(self,"intercept_scaled_",b0)
setattr(self,"residuals_",r)
setattr(self,"fitted_",yfit)
setattr(self,"y_loadings_",C)
setattr(self,"y_loc_",my)
setattr(self,"y_sca_",sy)
setattr(self,"x_weights_",W)
setattr(self,"x_loadings_",P)
setattr(self,"x_rotations_",R)
setattr(self,"x_scores_",T)
setattr(self,"x_ev_",Xev)
setattr(self,"crit_values_",assovec)
setattr(self,"Maxobjf_",Maxobjf)
if self.whiten_data:
setattr(self,"whitening_",K)
if mixing:
setattr(self,"mixing_", | np.linalg.pinv(W) | numpy.linalg.pinv |
from __future__ import division
import numpy as np
from numpy import log10
from scipy.interpolate import PchipInterpolator as interp1d
def seismic(f, ifo):
"""Seismic noise.
"""
return seismicAll(f, ifo)[0]
def seismicAll(f, ifo):
"""Seismic noise.
Return (noise, noise_vertical, noise_horizontal)
"""
hTable = ifo.Suspension.hTable
vTable = ifo.Suspension.vTable
theta = ifo.Suspension.VHCoupling.theta
# noise input, horizontal and vertical
if 'PlatformMotion' in ifo.Seismic:
if ifo.Seismic.PlatformMotion == 'BSC':
nt, nr = seisBSC(f)
elif ifo.Seismic.PlatformMotion == '6D':
nt, nr = seis6D(f)
else:
nt, nr = seisBSC(f)
else:
nt, nr = seisBSC(f)
# horizontal noise total
nh = (abs(hTable)**2) * nt**2
# vertical noise total
nv = (abs(theta * vTable)**2) * nt**2
# new total noise
n = nv + nh
# Convert into Strain PSD (4 TMs)
nh *= 4 * ifo.gwinc.dhdl_sqr
nv *= 4 * ifo.gwinc.dhdl_sqr
n *= 4 * ifo.gwinc.dhdl_sqr
return n, nh, nv
def seisBSC(f):
"""Rough ISI noise source spectra.
Returns ISI (translational, rotational) DOFs
"""
SEI_F = np.array([0.01, 0.03, 0.1, 0.2, 0.5, 1, 10, 30, 300])
# translational DOFs
SEI_T = | np.array([3e-6, 1e-6, 2e-7, 2e-7, 8e-10, 1e-11, 3e-13, 3e-14, 3e-14]) | numpy.array |
import numpy as np
from numpy import *
from astropy import units as u
from scipy.integrate import quad
import math as math
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
import scipy.optimize as opt
from matplotlib import rcParams as rcp
from matplotlib import colors
from matplotlib import rc
plt.rcParams['figure.figsize'] = [9, 6]
plt.rcParams['figure.dpi'] = 100
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
rcp['axes.formatter.useoffset'] = False
rcp['axes.linewidth'] = 1.5
rcp['axes.axisbelow'] = False
rcp['xtick.major.size'] = 8
rcp['xtick.minor.size'] = 4
rcp['xtick.labelsize'] = 15
rcp['legend.fontsize'] = 15
rcp['xtick.direction'] = 'in'
rcp['ytick.major.width'] = 2
rcp['ytick.minor.width'] = 2
rcp['savefig.dpi'] = 300
rcp["figure.dpi"] = 100
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates.angles import Angle
import sympy as sp
c_km = (c.to('km/s').value)
######## PANTHEON data ######################
# import redshifts (cosmological + heliocentric), apparent magnitudes, error of app.magnitudes, systematic errors
######## PANTHEON data ######################
# import redshifts (cosmological + heliocentric), apparent magnitudes, error of app.magnitudes,systematic errors
# Get the data from the github repository Pantheon of Dan Scolnic #
# https://github.com/dscolnic/Pantheon/blob/master/lcparam_full_long_zhel.txt #
data = np.loadtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt', usecols=[1,2,4,5])
### list with all the systematics as found in the PANTHEON catalog ###
# get the full systematics from the same repository #
#https://github.com/dscolnic/Pantheon/blob/master/sys_full_long.txt #
sys = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/systematics.txt', skip_header=1)
### The list sn_names contains all the supernovae names in the PANTHEON catalog ###
sn_names = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Scolnic_data_updated.txt', usecols=[0],dtype='str')
z_cmb= np.array((data[:,0])) ## CMB redshift
z_hel = np.array(np.array(data[:,1])) ## heliocentric redshift
mb = np.array(data[:,2]) ## apparent magnitude
### We select the C11 Scattering Model.
names = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt',dtype='str', skip_header=67, usecols=[1])
########## SEPARATE THE Pantheon sample into 13 subsamples based on idsurvey ##########
idsurvey1 = np.genfromtxt('/home/kerky/anaconda3/SN1A_DATA/PANTHEON_DATA/Ancillary_C11.FITRES.txt', skip_header=67, usecols=[3], dtype="str").astype(float)
### The idsurvey1= 61, 62, 63, 64, 65, 66 constitute the CfA
### The idsurvey1=61 constitutes the CfA1
### The idsurvey1=62 constitutes the CfA2
### The idsurvey1=65, 66 constitute the CfA4
### The idsurvey1=63, 64 constitute the CfA3
### The idsurvey1=15 constitutes the PS1
### The idsurvey1=4 constitutes the SNLS
### The idsurvey1=100, 101, 106 constitute the HST
### The idsurvey1=1 constitute the SDSS
### The idsurvey1=5 constitutes the CSP
xx_high = z_cmb[(idsurvey1!=15) & (idsurvey1!=1) & (idsurvey1!=4) & (idsurvey1!=5) &
(idsurvey1!=61) & (idsurvey1!=62) & (idsurvey1!= 63) &(idsurvey1!=64) & (idsurvey1!=65) &
(idsurvey1!=66)]
print(len(xx_high))
print(np.min(xx_high))
print(np.max(xx_high))
print(np.median(xx_high))
xx_low = z_cmb[(idsurvey1!=15) & (idsurvey1!=1) & (idsurvey1!=4) & (idsurvey1!=5) & (z_cmb<0.7)]
print(len(xx_low))
print(np.min(xx_low))
print(np.max(xx_low))
print(np.median(xx_low))
xx_SDSS = z_cmb[idsurvey1==1]
print(len(xx_SDSS))
print(np.min(xx_SDSS))
print(np.max(xx_SDSS))
print(np.median(xx_SDSS))
xx_SNLS = z_cmb[idsurvey1==4]
print(len(xx_SNLS))
print(np.min(xx_SNLS))
print(np.max(xx_SNLS))
print( | np.median(xx_SNLS) | numpy.median |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 24 14:09:56 2019
@author: s146959
"""
# ========================================================================== #
# ========================================================================== #
from __future__ import absolute_import, with_statement, absolute_import, \
division, print_function, unicode_literals
# ========================================================================== #
# ========================================================================== #
import numpy as _np
import os as _os
import matplotlib.pyplot as _plt
#import scipy.signal.correlate as xcorr
from FFT.fft_analysis import fftanal, ccf
from pybaseutils.plt_utils import savefig
from FFT.windows import _crosscorr
def chunk(x, n):
'''Split the list, xs, into n chunks'''
L = len(x)
assert 0 < n <= L
s = L//n
return [x[p:p+s] for p in range(0, L, s)]
datafolder = _os.path.abspath(_os.path.join('..','..','..','..', 'Workshop'))
#datafolder = _os.path.join('/homea','weir','bin')
print(datafolder)
#
#tt=_np.linspace(0,2.0*_np.pi,401)
#tt2=_np.linspace(0,4.0*_np.pi,801)
#sin1=_np.sin(tt)
#sin2=_np.sin(tt)
#
##sin1=_np.array([0,0.5,1,2,3,3,4,2,1,0.5,0])
##sin2=_np.array([0,0.5,1,2,3,3,4,2,1,0.5,0])
##tt=range(len(sin1))
#
#x=_crosscorr(sin1,sin2)/_np.sum(sin1**2)
#[t,y]=ccf(sin1,sin2,1/((tt[-1]-tt[0])/(len(sin1)-1)))
#fig0=_plt.figure()
#_plt.plot(sin1)
#_plt.plot(sin2)
#fig=_plt.figure()
#_plt.plot(y)
#fig.suptitle('ccf method on sines')
#fig1=_plt.figure()
#_plt.plot(x)
#fig1.suptitle('crosscorr method on sines')
cmPerGHz = 1
for nwindows in [1,10,100,1000,10000]:
# nwindows = 100
overlap=0.0
sintest=True
scantitl = 'CECE_jan17_fix4'
# scantitl += '_50to400'
#scantitl += '_400to500'
freq_ref = 60.0 # [GHz]
fils = ['CECE.69769','CECE.69770','CECE.69771','CECE.69772','CECE.69773','CECE.69777']
freqs = [13.075, 13.075, 13.085, 13.095, 13.105, 13.08]
freqs = [4.0*freq+8.0 for freq in freqs]
#fils = ['CECE.65642','CECE.65643','CECE.65644','CECE.65645','CECE.65646','CECE.65647']
#freqs = [68.0, 68.3, 68.2, 68.1, 68.15, 68.05]
#
#fils.extend(['CECE.65648','CECE.65649','CECE.65650','CECE.65651','CECE.65652'])
#freqs.extend([67.95, 68.25, 68.125, 67.90, 67.80])
intb = [15e3, 500e3] # original
#intb = [50e3, 400e3] # broadband fluctuations
# intb = [400e3, 465e3] # high frequency mode
tb=[0.29,0.31]
#tb=[0.3,0.39]
#tb = [0.192, 0.370]
nfils = len(fils)
#for ii in range(1):
#
# filn = _os.path.abspath(_os.path.join(datafolder, fils[ii]))
# print(filn)
# tt, tmpRF, tmpIF = \
# _np.loadtxt(filn, dtype=_np.float64, unpack=True, usecols=(0,1,2))
# tt = 1e-3*tt
# tt_tb=[_np.where(tt==tb[0])[0][0],_np.where(tt==tb[1])[0][0]]
# [tau,co]=ccf(tmpRF,tmpIF,(len(tt[tt_tb[0]:tt_tb[1]])-1)/(tt[tt_tb[1]]-tt[tt_tb[0]]))
# fig=_plt.figure()
# sub1=_plt.subplot(3,1,1)
# sub2=_plt.subplot(3,1,2,sharex=sub1)
# sub3=_plt.subplot(3,1,3)
#
# sub1.plot(tt[tt_tb[0]:tt_tb[1]],tmpRF[tt_tb[0]:tt_tb[1]])
# sub2.plot(tt[tt_tb[0]:tt_tb[1]],tmpIF[tt_tb[0]:tt_tb[1]])
# sub3.plot(tau,co)
for ii in range(1):
if not sintest:
filn = _os.path.abspath(_os.path.join(datafolder, fils[ii]))
print(filn)
tt, tmpRF, tmpIF = \
_np.loadtxt(filn, dtype=_np.float64, unpack=True, usecols=(0,1,2))
tt = 1e-3*tt
tt_tb=[_np.where(tt<=tb[0])[0][0],_np.where(tt>=tb[1])[0][0]]
tt_used=tt[tt_tb[0]:tt_tb[1]]
RF_used=tmpRF[tt_tb[0]:tt_tb[1]]
IF_used=tmpIF[tt_tb[0]:tt_tb[1]]
if sintest:
tb=[0,2.0]
df=15.50e3
_np.random.seed()
n_s=5000001
tt=_np.linspace(0,2.0*_np.pi,n_s)
fs=1/(((tt[len(tt)-1]-tt[0])/len(tt)))
RF_used=0.05* | _np.sin(2.0*_np.pi*(df)*tt) | numpy.sin |
__copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
Copyright (C) 2021 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import cantera as ct
import numpy as np # noqa: F401
import pyrometheus as pyro
import pytest
try:
import jax
except ImportError:
numpy_list = [np]
jnp = None
else:
import jax.numpy as jnp # noqa: F401
jax.config.update("jax_enable_x64", 1)
numpy_list = [np, jnp]
def make_jax_pyro_class(ptk_base_cls, usr_np):
if usr_np != jnp:
return ptk_base_cls(usr_np)
class PyroJaxNumpy(ptk_base_cls):
def _pyro_make_array(self, res_list):
"""This works around (e.g.) numpy.exp not working with object arrays of numpy
scalars. It defaults to making object arrays, however if an array
consists of all scalars, it makes a "plain old" :class:`numpy.ndarray`.
See ``this numpy bug <https://github.com/numpy/numpy/issues/18004>`__
for more context.
"""
from numbers import Number
# Needed to play nicely with Jax, which frequently creates
# arrays of shape () when handed numbers
all_numbers = all(
isinstance(e, Number)
or (isinstance(e, self.usr_np.ndarray) and e.shape == ())
for e in res_list)
if all_numbers:
return self.usr_np.array(res_list, dtype=self.usr_np.float64)
result = self.usr_np.empty_like(res_list, dtype=object,
shape=(len(res_list),))
# 'result[:] = res_list' may look tempting, however:
# https://github.com/numpy/numpy/issues/16564
for idx in range(len(res_list)):
result[idx] = res_list[idx]
return result
def _pyro_norm(self, argument, normord):
"""This works around numpy.linalg norm not working with scalars.
If the argument is a regular ole number, it uses :func:`numpy.abs`,
otherwise it uses ``usr_np.linalg.norm``.
"""
# Wrap norm for scalars
from numbers import Number
if isinstance(argument, Number):
return self.usr_np.abs(argument)
# Needed to play nicely with Jax, which frequently creates
# arrays of shape () when handed numbers
if isinstance(argument, self.usr_np.ndarray) and argument.shape == ():
return self.usr_np.abs(argument)
return self.usr_np.linalg.norm(argument, normord)
return PyroJaxNumpy(usr_np=usr_np)
# Write out all the mechanisms for inspection
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
def test_generate_mechfile(mechname):
"""This "test" produces the mechanism codes."""
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
with open(f"mechs/{mechname}.py", "w") as mech_file:
code = pyro.gen_thermochem_code(sol)
print(code, file=mech_file)
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
@pytest.mark.parametrize("usr_np", numpy_list)
def test_get_rate_coefficients(mechname, usr_np):
"""This function tests that pyrometheus-generated code
computes the rate coefficients matching Cantera
for given temperature and composition"""
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
ptk_base_cls = pyro.get_thermochem_class(sol)
ptk = make_jax_pyro_class(ptk_base_cls, usr_np)
# Test temperatures
temp = np.linspace(500.0, 3000.0, 10)
for t in temp:
# Set new temperature in Cantera
sol.TP = t, ct.one_atm
# Concentrations
y = sol.Y
rho = sol.density
c = ptk.get_concentrations(rho, y)
# Get rate coefficients and compare
k_ct = sol.forward_rate_constants
k_pm = ptk.get_fwd_rate_coefficients(t, c)
print(k_ct)
print(np.abs((k_ct-k_pm)/k_ct))
assert np.linalg.norm((k_ct-k_pm)/k_ct, np.inf) < 1e-14
return
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
@pytest.mark.parametrize("usr_np", numpy_list)
def test_get_pressure(mechname, usr_np):
"""This function tests that pyrometheus-generated code
computes the Cantera-predicted pressure for given density,
temperature, and mass fractions
"""
# Create Cantera and pyrometheus objects
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
ptk_base_cls = pyro.get_thermochem_class(sol)
ptk = make_jax_pyro_class(ptk_base_cls, usr_np)
# Temperature, equivalence ratio, oxidizer ratio, stoichiometry ratio
t = 300.0
phi = 2.0
alpha = 0.21
nu = 0.5
# Species mass fractions
i_fu = ptk.species_index("H2")
i_ox = ptk.species_index("O2")
i_di = ptk.species_index("N2")
x = np.zeros(ptk.num_species)
x[i_fu] = (alpha * phi) / (nu + alpha * phi)
x[i_ox] = nu * x[i_fu] / phi
x[i_di] = (1.0 - alpha) * x[i_ox] / alpha
# Get equilibrium composition
sol.TPX = t, ct.one_atm, x
sol.equilibrate("UV")
t, rho, y = sol.TDY
p_ct = sol.P
# Compute pressure with pyrometheus and compare to Cantera
p_pm = ptk.get_pressure(rho, t, y)
assert abs(p_ct - p_pm) / p_ct < 1.0e-12
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
@pytest.mark.parametrize("usr_np", numpy_list)
def test_get_thermo_properties(mechname, usr_np):
"""This function tests that pyrometheus-generated code
computes thermodynamic properties c_p, s_r, h_rt, and k_eq
correctly by comparing against Cantera"""
# Create Cantera and pyrometheus objects
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
ptk_base_cls = pyro.get_thermochem_class(sol)
ptk = make_jax_pyro_class(ptk_base_cls, usr_np)
# Loop over temperatures
temp = np.linspace(500.0, 3000.0, 10)
for t in temp:
# Set state in cantera for comparison
sol.TP = t, ct.one_atm
# Get properties from pyrometheus and compare to Cantera
cp_pm = ptk.get_species_specific_heats_r(t)
cp_err = | np.linalg.norm(cp_pm - sol.standard_cp_R, np.inf) | numpy.linalg.norm |
#!/usr/bin/env python
import rospy
import tf
import math
from nav_msgs.srv import GetMap
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from visualization_msgs.msg import MarkerArray,Marker
from nav_msgs.msg import OccupancyGrid
import numpy as np
from mapping import Mapping
import sys
MAX_LASER_RANGE = 30
STATE_SIZE = 3
class SLAM_ICP():
def __init__(self):
# ros param
self.robot_x = rospy.get_param('/slam/robot_x',0)
self.robot_y = rospy.get_param('/slam/robot_y',0)
self.robot_theta = rospy.get_param('/slam/robot_theta',0)
## ros param of mapping
self.map_x_width = rospy.get_param('/slam/map_width')
self.map_y_width = rospy.get_param('/slam/map_height')
self.map_reso = rospy.get_param('/slam/map_resolution')
self.map_cellx_width = int(round(self.map_x_width/self.map_reso))
self.map_celly_width = int(round(self.map_y_width/self.map_reso))
# odom robot init states
# self.sensor_sta = [self.robot_x,self.robot_y,self.robot_theta]
self.isFirstScan = True
self.src_pc = []
self.tar_pc = []
self.mapping = Mapping(self.map_cellx_width,self.map_celly_width,self.map_reso)
# State [cos(yaw) -sin(yaw) x]
# [sin(yaw) cos(yaw) y]
# [0 0 1]
self.xEst = np.eye(STATE_SIZE)
self.xOdom = np.zeros((STATE_SIZE, 1))
# map observation
self.obstacle = []
# radius
self.obstacle_r = 10
# ros topic
self.laser_sub = rospy.Subscriber('/scan',LaserScan,self.laserCallback)
self.odom_pub = rospy.Publisher('icp_odom',Odometry,queue_size=3)
self.location_pub = rospy.Publisher('icp_location',Odometry,queue_size=3)
self.odom_broadcaster = tf.TransformBroadcaster()
self.map_pub = rospy.Publisher('/slam_map',OccupancyGrid,queue_size=1)
self.tf = tf.TransformListener()
def laserCallback(self, msg):
np_msg = self.laserToNumpy(msg)
# icp implement in self.calc_odometry. Return the robot pose in world frame.
self.xEst = self.get_trans()
#transform the laser points from the laser frame into the world frame
obs = self.xEst.dot(np_msg)
pmap = self.mapping.update(obs[0], obs[1], self.xEst[0,2], self.xEst[1,2])
# print(pmap)
self.publishMap(pmap)
self.publishResult()
pass
def get_trans(self):
self.tf.waitForTransform("/laser", "/map", rospy.Time(),
rospy.Duration(4.0))
l2m, rot = self.tf.lookupTransform('/laser', '/map', rospy.Time(0))
euler = tf.transformations.euler_from_quaternion(rot)
roll, pitch, yaw_l2m = euler[0], euler[1], euler[2]
self.tf.waitForTransform("/map", "/world_base", rospy.Time(),
rospy.Duration(4.0))
m2w, rot = self.tf.lookupTransform('/map', '/world_base', rospy.Time(0))
euler = tf.transformations.euler_from_quaternion(rot)
roll, pitch, yaw_m2w = euler[0], euler[1], euler[2]
dx = -l2m[0] * math.cos(yaw_l2m) - l2m[1] * math.sin(yaw_l2m) - m2w[0]
dy = l2m[0] * math.sin(yaw_l2m) - l2m[1] * math.cos(yaw_l2m) - m2w[1]
dyaw = -yaw_l2m - yaw_m2w
# State [cos(yaw) -sin(yaw) x]
# [sin(yaw) cos(yaw) y]
# [0 0 1]
xEst = np.array([
[math.cos(dyaw), -math.sin(dyaw), dx * math.cos(yaw_m2w) + dy * math.sin(yaw_m2w)],
[math.sin(dyaw), math.cos(dyaw), -dx * math.sin(yaw_m2w) + dy * math.cos(yaw_m2w)],
[0, 0, 1]
])
return xEst
def laserToNumpy(self, msg):
total_num = len(msg.ranges)
pc = np.ones([3,total_num])
range_l = np.array(msg.ranges)
range_l[range_l == np.inf] = MAX_LASER_RANGE
angle_l = np.linspace(msg.angle_min,msg.angle_max,total_num)
pc[0:2,:] = np.vstack((np.multiply(np.cos(angle_l),range_l),np.multiply( | np.sin(angle_l) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Created on Mon May 6 22:58:44 2013
@author: ludo
"""
import os
import math
import numpy
import scipy
import csv
import json
from time import time
from PIL import Image
import matplotlib
from matplotlib import pylab
from matplotlib import pyplot
from matplotlib.patches import Circle
from matplotlib import pyplot as plt
from cellpack.autopack.upy import colors as col
import cellpack.autopack as autopack
from cellpack.autopack.transformation import signed_angle_between_vectors
from cellpack.autopack.ldSequence import halton
from cellpack.autopack.GeometryTools import GeometryTools, Rectangle
from cellpack.autopack.upy.colors import map_colors
from cellpack.autopack.plotly_result import PlotlyAnalysis
def autolabel(rects, ax):
# from http://matplotlib.org/examples/api/barchart_demo.html
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2.0,
height / 2.0,
"%d" % int(height),
ha="center",
va="bottom",
)
def autolabelyerr(ax, rects, err=None):
# attach some text labels
for i, rect in enumerate(rects):
height = rect.get_height()
v = "%.2f" % height
y = 0.5 * height
if err is not None:
v = "%.2f" % err[i]
y = 1.05 * height
ax.text(rect.get_x() + rect.get_width() / 2.0, y, v, ha="center", va="bottom")
def autolabels(loci1, loci2, loci3, ax, yerr1, yerr2, yerr3):
# from http://matplotlib.org/examples/api/barchart_demo.html
# attach some text labels
for i in range(len(loci1)): # rects:
rect1 = loci1[i]
rect2 = loci2[i]
rect3 = loci3[i]
height1 = rect1.get_height()
height2 = rect2.get_height()
height3 = rect3.get_height()
ax.text(
rect1.get_x() + rect1.get_width() / 2.0,
height1 / 2.0,
"%2.1f" % (height1 * 100.0),
ha="center",
va="bottom",
color="black",
)
ax.text(
rect2.get_x() + rect2.get_width() / 2.0,
height2 / 2.0 + height1,
"%2.1f" % (height2 * 100.0),
ha="center",
va="bottom",
color="black",
)
ax.text(
rect3.get_x() + rect2.get_width() / 2.0,
height3 / 2.0 + height1 + height2,
"%2.1f" % (height3 * 100.0),
ha="center",
va="bottom",
color="white",
)
ax.text(
rect1.get_x() + rect1.get_width() / 2.0,
1.01 * height1,
"%2.1f" % (yerr1[i] * 100.0),
ha="center",
va="bottom",
color="black",
)
ax.text(
rect2.get_x() + rect2.get_width() / 2.0,
1.01 * (height2 + height1),
"%2.1f" % (yerr2[i] * 100.0),
ha="center",
va="bottom",
color="white",
)
ax.text(
rect3.get_x() + rect2.get_width() / 2.0,
1.01 * (height3 + height1 + height2),
"%2.1f" % (yerr3[i] * 100.0),
ha="center",
va="bottom",
color="black",
)
def getRndWeighted(listPts, weight, yerr):
w = [yerr[i] * numpy.random.random() + weight[i] for i in range(len(weight))]
t = numpy.cumsum(w)
s = numpy.sum(w)
i = numpy.searchsorted(t, numpy.random.rand(1) * s)[0]
return listPts[i]
class AnalyseAP:
def __init__(self, env=None, viewer=None, result_file=None):
self.env = None
self.smallest = 99999.0
self.largest = 0.0
if env:
self.env = env
self.smallest, self.largest = self.getMinMaxProteinSize()
self.afviewer = viewer
self.helper = None
if viewer:
self.helper = self.afviewer.vi
self.result_file = result_file
self.center = [0, 0, 0]
self.bbox = [[0, 0, 0], [1, 1, 1]]
self.g = GeometryTools()
self.g.Resolution = 1.0 # or grid step?
self.current_pos = None
self.current_distance = None
self.plotly = PlotlyAnalysis()
autopack._colors = None
def getMinMaxProteinSize(self):
smallest = 999999.0
largest = 0.0
for organelle in self.env.compartments:
mini, maxi = organelle.getMinMaxProteinSize()
if mini < smallest:
smallest = mini
if maxi > largest:
largest = maxi
if self.env.exteriorRecipe:
mini, maxi = self.env.exteriorRecipe.getMinMaxProteinSize()
if mini < smallest:
smallest = mini
if maxi > largest:
largest = maxi
return smallest, largest
def getPositionsFromResFile(self):
# could actually restore file using histoVol.
# or not
# need to parse apr file here anyway
return []
def getPositionsFromObject(self, parents):
positions = []
for parent in parents:
obparent = self.helper.getObject(parent)
children = self.helper.getChilds(obparent)
for ch in children:
ingr_name = self.helper.getName(ch)
meshp = self.helper.getObject("Meshs_" + ingr_name.split("_")[0])
if meshp is None:
c = self.helper.getChilds(ch)
if not len(c):
continue
meshp_children = self.helper.getChilds(
c[0]
) # continue #should get sphere/cylnder parent ?
else:
meshp_children = self.helper.getChilds(meshp)
for cc in meshp_children:
pos = self.helper.ToVec(self.helper.getTranslation(cc))
positions.append(pos)
return positions
def getDistanceFrom(self, target, parents=None, **options):
"""
target : name or host object target or target position
parent : name of host parent object for the list of object to measure distance from
objects : list of object or list of points
"""
# get distance from object to the target.
# all object are in h.molecules and orga.molecules
# get options
if type(target) == list or type(target) == tuple:
targetPos = target
elif type(target) == str:
o = self.helper.getObject(target)
if o is not None:
targetPos = self.helper.ToVec(self.helper.getTranslation(o)) # hostForm
else:
o = self.helper.getObject(target)
if o is not None:
targetPos = self.helper.ToVec(self.helper.getTranslation(o)) # hostForm
listCenters = []
if self.result_file is None:
if parents is None and self.result_file is None:
listeParent = [self.env.name + "_cytoplasm"]
for o in self.env.compartments:
listeParent.append(o.name + "_Matrix")
listeParent.append(o.name + "_surface")
elif parents is not None and self.result_file is None:
listeParent = parents
listCenters = self.getPositionsFromObject(listeParent)
delta = numpy.array(listCenters) - numpy.array(targetPos)
delta *= delta
distA = numpy.sqrt(delta.sum(1))
return distA
def getClosestDistance(self, parents=None, **options):
if self.result_file is None:
if parents is None and self.result_file is None:
listeParent = [self.env.name + "_cytoplasm"]
for o in self.env.compartments:
listeParent.append(o.name + "_Matrix")
listeParent.append(o.name + "_surface")
elif parents is not None and self.result_file is None:
listeParent = parents
listeCenters = self.getPositionsFromObject(listeParent)
else:
# use data from file
# TODO: currently getPositionsFromResFile returns an empty list
# listeCenters = self.getPositionsFromResFile(listeParent)
listeCenters = []
# is the distance in the result array ?
listeDistance = numpy.zeros(len(listeCenters)) + 99999
for i in range(len(listeCenters)):
for j in range(i + 1, len(listeCenters)):
# should use point
d = self.helper.measure_distance(listeCenters[i], listeCenters[j])
if d < listeDistance[i]:
listeDistance[i] = d
return listeDistance
def displayDistance(
self,
ramp_color1=[1, 0, 0],
ramp_color2=[0, 0, 1],
ramp_color3=None,
cutoff=60.0,
):
distances = numpy.array(self.env.grid.distToClosestSurf[:])
mask = distances > cutoff
ind = numpy.nonzero(mask)[0]
distances[ind] = cutoff
mask = distances < 0 # -cutoff
ind = numpy.nonzero(mask)[0]
distances[ind] = 0 # cutoff
base = self.helper.getObject(self.env.name + "distances_base")
if base is None:
base = self.helper.Sphere(self.env.name + "distances_base")[0]
p = self.helper.getObject(self.env.name + "distances")
if p is not None:
self.helper.deleteObject(p) # recursif?
p = self.helper.newEmpty(self.env.name + "distances")
# can use cube also
def displayDistanceCube(
self,
ramp_color1=[1, 0, 0],
ramp_color2=[0, 0, 1],
ramp_color3=None,
cutoff=60.0,
):
distances = numpy.array(self.env.grid.distToClosestSurf[:])
mask = distances > cutoff
ind = numpy.nonzero(mask)[0]
distances[ind] = cutoff
mask = distances < 0 # -cutoff
ind = numpy.nonzero(mask)[0]
distances[ind] = 0 # cutoff
base = self.helper.getObject(self.env.name + "distances_base_cube")
if base is None:
# base=self.helper.Sphere(self.env.name+"distances_base")[0]
size = self.env.grid.gridSpacing
base = self.helper.box(
self.env.name + "distances_base_cube",
center=[0.0, 0.0, 0.0],
size=[size, size, size],
)[0]
parent_cube = self.helper.getObject(self.env.name + "distances_cubes")
if parent_cube is not None:
self.helper.deleteObject(parent_cube) # recursif?
parent_cube = self.helper.newEmpty(self.env.name + "distances_cubes")
def displayDistancePlane(
self,
ramp_color1=[1, 0, 0],
ramp_color2=[0, 0, 1],
ramp_color3=None,
cutoff=60.0,
):
# which axis ?
distances = numpy.array(self.env.grid.distToClosestSurf[:])
ramp = col.getRamp([ramp_color1, ramp_color2], size=255) # color
mask = distances > cutoff
ind = numpy.nonzero(mask)[0]
distances[ind] = cutoff
mask = distances < 0 # -cutoff
ind = numpy.nonzero(mask)[0]
distances[ind] = 0 # cutoff
newd = numpy.append(distances, cutoff)
colors = map_colors(newd, ramp)[:-1] # 1D array of the grid x,y,1
autopack._colors = colors
p = self.helper.getObject(self.env.name + "distances")
if p is not None:
self.helper.deleteObject(p) # recursif?
p = self.helper.newEmpty(self.env.name + "distances_p")
d = numpy.array(self.env.grid.boundingBox[0]) - numpy.array(
self.env.grid.boundingBox[1]
)
p, mpl = self.helper.plane(
self.env.name + "distances_plane",
center=self.env.grid.getCenter(),
size=[math.fabs(d[0]), math.fabs(d[1])],
parent=p,
)
self.helper.rotateObj(p, [0, 0, -math.pi / 2.0])
filename = (
autopack.cache_results
+ os.sep
+ self.env.name
+ "distances_plane_texture.png"
)
c = colors.reshape(
(
self.env.grid.nbGridPoints[0],
self.env.grid.nbGridPoints[1],
self.env.grid.nbGridPoints[2],
3,
)
)
im = Image.fromstring(
"RGB", (c.shape[0], c.shape[1]), numpy.uint8(c * 255.0).tostring()
)
im.save(str(filename))
mat = self.helper.createTexturedMaterial(
self.env.name + "planeMat", str(filename)
)
# assign the material to the plane
self.helper.assignMaterial(p, mat, texture=True)
def writeJSON(self, filename, data):
with open(filename, "w") as fp: # doesnt work with symbol link ?
json.dump(
data, fp, indent=4, separators=(",", ": ")
) # ,indent=4, separators=(',', ': ')
def loadJSON(self, filename):
with open(filename) as data_file:
data = json.load(data_file)
return data
def grabResultFromJSON(self, n):
ingrrot = {}
ingrpos = {}
for i in range(n):
with open("results_seed_" + str(i) + ".json") as data_file:
data = json.load(data_file)
for recipe in data:
for ingrname in data[recipe]:
for k in range(len(data[recipe][ingrname]["results"])):
if ingrname not in ingrrot:
ingrrot[ingrname] = []
ingrpos[ingrname] = []
ingrrot[ingrname].append(
data[recipe][ingrname]["results"][k][1]
)
ingrpos[ingrname].append(
data[recipe][ingrname]["results"][k][0]
)
return ingrpos, ingrrot
def grabResultFromTXT(self, n, doanalyze=False):
from autopack import transformation as t
ingrrot = {}
ingrpos = {}
for i in range(1000):
files = open("results_seed_" + str(i) + ".txt", "r")
lines = files.readlines()
files.close()
for line in lines:
line = line.replace("<", " ").replace(">", " ")
elem = line.split()
ingrname = elem[-5]
if ingrname not in ingrrot:
ingrrot[ingrname] = []
ingrpos[ingrname] = []
ingrrot[ingrname].append(eval(elem[2]))
ingrpos[ingrname].append(eval(elem[0]))
for ingrname in ingrrot:
ingrrot[ingrname] = [
numpy.array(m).reshape((4, 4)) for m in ingrrot[ingrname]
]
if doanalyze:
for ingrname in ingrrot:
eulers3 = [t.euler_from_matrix(m, "rxyz") for m in ingrrot[ingrname]]
e3 = numpy.degrees(numpy.array(eulers3)).transpose()
numpy.savetxt(
ingrname + "_euler_X.csv", numpy.array(e3[0]), delimiter=","
)
numpy.savetxt(
ingrname + "_euler_Y.csv", numpy.array(e3[1]), delimiter=","
)
numpy.savetxt(
ingrname + "_euler_Z.csv", numpy.array(e3[2]), delimiter=","
)
self.histo(e3[0], ingrname + "_euler_X.png", bins=12, size=max(e3[0]))
self.histo(e3[1], ingrname + "_euler_Y.png", bins=12, size=max(e3[1]))
self.histo(e3[2], ingrname + "_euler_Z.png", bins=12, size=max(e3[2]))
# ingrpositions,distA,angles3=self.getDistanceAngle(ingrpos3, ingrrot3)
# numpy.savetxt(ingrname+"_angle_X.csv", numpy.array(angles3[1]), delimiter=",")
# numpy.savetxt(ingrname+"_angle_Y.csv", numpy.array(angles3[2]), delimiter=",")
# numpy.savetxt(ingrname+"_angle_Z.csv", numpy.array(angles3[3]), delimiter=",")
# self.histo(angles3[1],ingrname+"_angle_X.png",bins=12,size=max(angles3[1]))
# self.histo(angles3[2],ingrname+"_angle_Y.png",bins=12,size=max(angles3[2]))
# self.histo(angles3[3],ingrname+"_angle_Z.png",bins=12,size=max(angles3[3]))
return ingrpos, ingrrot
# should take any type of list...
def save_csv(self, data, filename=None):
if filename is None:
filename = "output.csv"
resultFile = open(filename, "wb")
wr = csv.writer(resultFile, dialect="excel")
# wr.writerows(data) list of list ?
# resultFile.close()
for item in data:
wr.writerow([item])
resultFile.close()
def rectangle_circle_area(self, bbox, center, radius):
# http://www.eex-dev.net/index.php?id=100
# [[0.,0,0],[1000,1000,1]]
# top,bottom, right, left
# rect=Rectangle(bbox[0][0],bbox[1][0],bbox[0][1],bbox[1][1])#top,bottom, right, left
rect = Rectangle(
bbox[1][1], bbox[0][1], bbox[1][0], bbox[0][0]
) # top,bottom, right, left
m = [center[0], center[1]]
r = radius
area = math.pi * r ** 2
chs = self.g.check_sphere_inside(rect, m, r)
if chs: # sph not completly inside
ch = self.g.check_rectangle_oustide(rect, m, r)
if ch: # rectangle not outside
leftBound, rightBound = self.g.getBoundary(rect, m, r)
area = self.g.get_rectangle_cercle_area(
rect, m, r, leftBound, rightBound
)
# print area,leftBound,rightBound
else:
area = bbox[0][1] ** 2
return area
def getAxeValue(self, ingrname, axe=0):
ingrpositions = [
self.env.molecules[i][0][axe]
for i in range(len(self.env.molecules))
if self.env.molecules[i][2].name == ingrname
]
return ingrpositions
def getAxesValues(self, positions):
pp = numpy.array(positions).transpose()
if len(positions) == 0:
return 1, 1, 1
px = pp[0]
py = pp[1]
pz = pp[2]
return px, py, pz
def getDistance(self, ingrname, center):
distA = []
ingrpositions = [
self.env.molecules[i][0]
for i in range(len(self.env.molecules))
if self.env.molecules[i][2].name == ingrname
]
ingrpositions = numpy.array(ingrpositions)
if len(ingrpositions):
delta = numpy.array(ingrpositions) - numpy.array(center)
delta *= delta
distA = numpy.sqrt(delta.sum(1)).tolist()
return ingrpositions, distA
def getDistanceAngle(self, ingr, center):
# need matrix to euler? then access and plot them?
# also check the measure angle one
angles = []
distA = []
ingr_positions = [
self.env.molecules[i][0]
for i in range(len(self.env.molecules))
if self.env.molecules[i][2].name == ingr.name
]
ingr_positions = numpy.array(ingr_positions)
ingr_rotation = [
self.env.molecules[i][1]
for i in range(len(self.env.molecules))
if self.env.molecules[i][2].name == ingr.name
]
ingr_rotation = numpy.array(ingr_rotation)
if len(ingr_positions):
delta = numpy.array(ingr_positions) - numpy.array(center)
# lets do it on X,Y,Z and also per positions ?
anglesX = numpy.array(
signed_angle_between_vectors(
[[0, 0, 1]] * len(ingr_positions),
ingr_rotation[:, 0, :3],
-delta,
directed=False,
axis=1,
)
)
anglesY = numpy.array(
signed_angle_between_vectors(
[[0, 1, 0]] * len(ingr_positions),
ingr_rotation[:, 1, :3],
-delta,
directed=False,
axis=1,
)
)
anglesZ = numpy.array(
signed_angle_between_vectors(
[[1, 0, 0]] * len(ingr_positions),
ingr_rotation[:, 2, :3],
-delta,
directed=False,
axis=1,
)
)
delta *= delta
distA = numpy.sqrt(delta.sum(1)).tolist()
angles = numpy.array([distA, anglesX, anglesY, anglesZ])
return ingr_positions, distA, numpy.degrees(angles)
def getVolumeShell(self, bbox, radii, center):
# rectangle_circle_area
volumes = []
box_size0 = bbox[1][0] - bbox[0][0]
for i in range(len(radii) - 1):
r1 = radii[i]
r2 = radii[i + 1]
v1 = self.g.calc_volume(r1, box_size0 / 2.0)
v2 = self.g.calc_volume(r2, box_size0 / 2.0)
# if v1 == 0 or v2 == 0 :
# volumes.append((4./3.)*numpy.pi*(numpy.power(r2,3)-numpy.power(r1, 3)))
# else :
volumes.append(v2 - v1)
return volumes
def rdf_3d(self, ingr):
# see for intersection volume here http://crowsandcats.blogspot.com/2013/04/cube-sphere-intersection-volume.html
# and here http://crowsandcats.blogspot.com/2013/05/extending-radial-distributions.html
# will require scipy...worth it ?
# should be pairewise distance ? or not ?
distances = numpy.array(self.env.distances[ingr.name])
basename = self.env.basename
numpy.savetxt(
basename + ingr.name + "_pos.csv",
numpy.array(self.env.ingrpositions[ingr.name]),
delimiter=",",
)
self.histo(distances, basename + ingr.name + "_histo.png")
numpy.savetxt(
basename + ingr.name + "_distances.csv",
numpy.array(distances),
delimiter=",",
)
# the bin should be not less than the biggest ingredient radius
# b=int(distances.max()/self.largest)
b = 100
# bin_edges = numpy.arange(0, min(box_size) / 2, bin_width)
new_rdf, edges = numpy.histogramdd(
distances, bins=b, range=[(distances.min(), distances.max())]
)
radii = edges[0]
# from http://isaacs.sourceforge.net/phys/rdfs.html
dnr = new_rdf
N = len(distances)
V = (
self.env.grid.nbGridPoints[0]
* self.env.grid.nbGridPoints[1]
* self.env.grid.nbGridPoints[2]
* self.env.grid.gridSpacing ** 3
)
Vshell = numpy.array(self.getVolumeShell(self.bbox, radii, self.center))
gr = (dnr * V) / (N * Vshell)
numpy.savetxt(basename + ingr.name + "_rdf.csv", numpy.array(gr), delimiter=",")
self.plot(gr, radii[:-1], basename + ingr.name + "_rdf.png")
def getAreaShell(self, bbox, radii, center):
# rectangle_circle_area
areas = []
for i in range(len(radii) - 1):
r1 = radii[i]
r2 = radii[i + 1]
area1 = self.rectangle_circle_area(bbox, center, r1)
area2 = self.rectangle_circle_area(bbox, center, r2)
if area1 == 0 or area2 == 0:
areas.append(numpy.pi * (numpy.power(r2, 2) - numpy.power(r1, 2)))
else:
areas.append(area2 - area1)
return areas
def ripley(self, positions, dr=25, rMax=None):
# K(t) = A*SUM(wij*I(i,j)/n**2)
# lambda = n/A A is the area of the region containing all points
# I indicator function 1 if its operand is true, 0 otherwise
# t is the search radius
# if homogenous K(s) = pi*s**2
# L(t) = (K(t)/pi)**1/2
# A common plot is a graph of t - \hat{L}(t) against t
# which will approximately follow the horizontal zero-axis with constant
# dispersion if the data follow a homogeneous Poisson process.
N = len(positions)
V = 1000 ** 2
diag = numpy.sqrt(1000 ** 2 + 1000 ** 2)
dr = dr # all_distance.min()
if rMax is None:
rMax = diag
edges = numpy.arange(dr, rMax + 1.1 * dr, dr)
k = numpy.zeros((N, len(edges)))
for i, p in enumerate(positions):
di = scipy.spatial.distance.cdist(
positions,
[p],
"euclidean",
)
# dV = np.array(analyse.getAreaShell(analyse.bbox,edges,p))
for j, e in enumerate(edges):
area0 = math.pi * e ** 2 # complete circle
area1 = self.rectangle_circle_area(self.bbox, p, e)
w = area1 / area0
k[i, j] = w * len(numpy.nonzero(di < e)[0]) / N ** 2
Kt = V * numpy.sum(k, axis=0)
Lt = (Kt / numpy.pi) ** 0.5
return Kt, Lt
# pos=numpy.array(self.env.ingrpositions[ingr.name])#np.array([np.array(p[0]) for p in h.molecules])
def rdf(self, positions, dr=10, rMax=None):
N = len(positions)
V = 1000 ** 2
diag = numpy.sqrt(1000 ** 2 + 1000 ** 2)
dr = dr # all_distance.min()
if rMax is None:
rMax = diag
edges = numpy.arange(0.0, rMax + 1.1 * dr, dr)
g = numpy.zeros((N, len(edges) - 1))
dv = []
density = float(N) / float(V)
for i, p in enumerate(positions):
di = scipy.spatial.distance.cdist(
positions,
[p],
"euclidean",
)
dN, bins = numpy.histogram(di, bins=edges)
dV = numpy.array(self.getAreaShell(self.bbox, edges, p))
dv.append(dV)
g[i] = dN / (dV * density)
avg = numpy.average(g, axis=0) # /np.array(dv)
return avg
def rdf_2d(self, ingr):
# dN/N / dV/V = dN/dV * V/N
distances = numpy.array(self.env.distances[ingr.name])
basename = self.env.basename
numpy.savetxt(
basename + ingr.name + "_pos.csv",
numpy.array(self.env.ingrpositions[ingr.name]),
delimiter=",",
)
self.histo(distances, basename + ingr.name + "_histo.png")
numpy.savetxt(
basename + ingr.name + "_distances.csv",
numpy.array(distances),
delimiter=",",
)
# the bin should be not less than the biggest ingredient radius
# b=int(distances.max()/self.largest)
new_rdf, edges = numpy.histogramdd(
distances
) # , bins=b, range=[(distances.min(), distances.max())],normed=0)
radii = edges[0]
# r=radii.tolist()
# r.insert(0,0.0)
# radii = numpy.array(r)
# rdf= new_rdf.tolist()
# rdf.insert(0,0)
# new_rdf = numpy.array(rdf)
# from http://isaacs.sourceforge.net/phys/rdfs.html
dnr = new_rdf[:]
N = len(distances)
V = (
self.env.grid.nbGridPoints[0]
* self.env.grid.nbGridPoints[1]
* self.env.grid.gridSpacing ** 2
)
Vshell = numpy.array(self.getAreaShell(self.bbox, radii, self.center))
# print Vshell
# Vshell1 = numpy.pi*density*(numpy.power(radii[1:],2)-numpy.power(radii[:-1], 2))
# print Vshell1
# print radii
gr = (dnr * V) / (N * Vshell)
numpy.savetxt(basename + ingr.name + "_rdf.csv", numpy.array(gr), delimiter=",")
self.plot(gr, radii[:-1], basename + ingr.name + "_rdf.png")
# simpl approach Ni/Areai
G = dnr / Vshell
numpy.savetxt(
basename + ingr.name + "_rdf_simple.csv", numpy.array(G), delimiter=","
)
self.plot(numpy.array(G), radii[:-1], basename + ingr.name + "_rdf_simple.png")
def axis_distribution_total(self, all_positions):
basename = self.env.basename
numpy.savetxt(
basename + "total_pos.csv", numpy.array(all_positions), delimiter=","
)
px, py, pz = self.getAxesValues(all_positions)
# m1=numpy.nonzero( numpy.logical_and(
# numpy.greater_equal(px, 0.), numpy.less_equal(px, 1000.0)))
# m2=numpy.nonzero( numpy.logical_and(
# numpy.greater_equal(py, 0.), numpy.less_equal(py, 1000.0)))
self.histo(px, basename + "total_histo_X.png", bins=10)
self.histo(py, basename + "total_histo_Y.png", bins=10)
self.histo(pz, basename + "total_histo_Z.png", bins=10)
def axis_distribution(self, ingr):
basename = self.env.basename
px, py, pz = self.getAxesValues(self.env.ingrpositions[ingr.name])
self.histo(px, basename + ingr.name + "_histo_X.png", bins=10)
self.histo(py, basename + ingr.name + "_histo_Y.png", bins=10)
self.histo(pz, basename + ingr.name + "_histo_Z.png", bins=10)
# do it for all ingredient cumulate?
def occurence_distribution(self, ingr):
basename = self.env.basename
occ = self.env.occurences[ingr.name]
self.simpleplot(range(len(occ)), occ, basename + ingr.name + "_occurence.png")
def correlation(self, ingr):
basename = self.env.basename
posxyz = numpy.array(self.env.ingrpositions[ingr.name]).transpose()
g_average, radii, x, y, z = self.PairCorrelationFunction_3D(
posxyz, 1000, 900, 100
)
self.plot(g_average, radii, basename + ingr.name + "_corr.png")
def PairCorrelationFunction_3D(self, data, S, rMax, dr):
"""Compute the three-dimensional pair correlation function for a set of
spherical particles contained in a cube with side length S. This simple
function finds reference particles such that a sphere of radius rMax drawn
around the particle will fit entirely within the cube, eliminating the need
to compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
z an array of z positions of centers of particles
S length of each side of the cube in space
rMax outer diameter of largest spherical shell
dr increment for increasing radius of spherical shell
Returns a tuple: (g, radii, interior_x, interior_y, interior_z)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
spherical shells used to compute g(r)
interior_x x coordinates of reference particles
interior_y y coordinates of reference particles
interior_z z coordinates of reference particles
"""
from numpy import zeros, sqrt, where, pi, average, arange, histogram
x = data[0]
y = data[1]
z = data[2]
# Find particles which are close enough to the cube center that a sphere of radius
# rMax will not cross any face of the cube
bools1 = x > rMax
bools2 = x < (S - rMax)
bools3 = y > rMax
bools4 = y < (S - rMax)
bools5 = z > rMax
bools6 = z < (S - rMax)
(interior_indices,) = where(bools1 * bools2 * bools3 * bools4 * bools5 * bools6)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError(
"No particles found for which a sphere of radius rMax\
will lie entirely within a cube of side length S. Decrease rMax\
or increase the size of the cube."
)
edges = arange(0.0, rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S ** 3
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x) ** 2 + (y[index] - y) ** 2 + (z[index] - z) ** 2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p, :] = result / numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i + 1]) / 2.0
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = average(g[:, i]) / (
4.0 / 3.0 * pi * (rOuter ** 3 - rInner ** 3)
)
return (
g_average,
radii,
x[interior_indices],
y[interior_indices],
z[interior_indices],
)
# Number of particles in shell/total number of particles/volume of shell/number density
# shell volume = 4/3*pi(r_outer**3-r_inner**3)
def PairCorrelationFunction_2D(self, x, y, S, rMax, dr):
"""Compute the two-dimensional pair correlation function, also known
as the radial distribution function, for a set of circular particles
contained in a square region of a plane. This simple function finds
reference particles such that a circle of radius rMax drawn around the
particle will fit entirely within the square, eliminating the need to
compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
S length of each side of the square region of the plane
rMax outer diameter of largest annulus
dr increment for increasing radius of annulus
Returns a tuple: (g, radii, interior_x, interior_y)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
annuli used to compute g(r)
interior_x x coordinates of reference particles
interior_y y coordinates of reference particles
"""
from numpy import zeros, sqrt, where, pi, average, arange, histogram
# Number of particles in ring/area of ring/number of reference particles/number density
# area of ring = pi*(r_outer**2 - r_inner**2)
# Find particles which are close enough to the box center that a circle of radius
# rMax will not cross any edge of the box
bools1 = x > 1.1 * rMax
bools2 = x < (S - 1.1 * rMax)
bools3 = y > rMax * 1.1
bools4 = y < (S - rMax * 1.1)
(interior_indices,) = where(bools1 * bools2 * bools3 * bools4)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError(
"No particles found for which a circle of radius rMax\
will lie entirely within a square of side length S. Decrease rMax\
or increase the size of the square."
)
edges = arange(0.0, rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S ** 2
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x) ** 2 + (y[index] - y) ** 2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p, :] = result / numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i + 1]) / 2.0
rOuter = edges[i + 1]
rInner = edges[i]
# divide by the area of sphere cut by sqyare
g_average[i] = average(g[:, i]) / (pi * (rOuter ** 2 - rInner ** 2))
return (g_average, radii, interior_indices)
def histo(self, distances, filename, bins=100, size=1000.0):
pylab.clf()
numpy.mean(distances), numpy.std(distances)
# the histogram of the data
# b=numpy.arange(distances.min(), distances.max(), 2)
# n, bins, patches = pyplot.hist(distances, bins=bins, normed=1, facecolor='green')#, alpha=0.75)
y, binEdges = numpy.histogram(distances, bins=bins)
bincenters = 0.5 * (binEdges[1:] + binEdges[:-1])
menStd = numpy.sqrt(y) # or sigma?
width = bins
pyplot.bar(bincenters, y, width=width, color="r", yerr=menStd)
# add a 'best fit' line?
# y = mlab.normpdf( bins, mu, sigma)#should be the excepted distribution
# l = pyplot.plot(bins, y, 'r--', linewidth=3)
pyplot.savefig(filename)
# pylab.close() # closes the current figure
def plot(self, rdf, radii, file_name):
pylab.clf()
matplotlib.rc("font", size=14)
matplotlib.rc("figure", figsize=(5, 4))
# pylab.clf()
pylab.plot(radii, rdf, linewidth=3)
pylab.xlabel(r"distance $r$ in $\AA$")
pylab.ylabel(r"radial distribution function $g(r)$")
pylab.savefig(file_name)
def simpleplot(self, X, Y, filenameme, w=3):
pylab.clf()
pylab.plot(X, Y, linewidth=w)
pylab.savefig(filenameme)
def build_grid(
self,
bb,
forceBuild=True,
):
t1 = time()
gridFileIn = None
gridFileOut = None
self.env.buildGrid(
boundingBox=bb,
gridFileIn=gridFileIn,
rebuild=forceBuild,
gridFileOut=gridFileOut,
previousFill=False,
)
t2 = time()
gridTime = t2 - t1
print("time to Build Grid", gridTime)
def pack(
self, seed=20, vTestid=3, vAnalysis=0, fbox_bb=None, show_plotly_plot=True
):
if show_plotly_plot:
self.plotly.update_title(self.env.placeMethod)
t1 = time()
self.env.pack_grid(
seedNum=seed, vTestid=vTestid, vAnalysis=vAnalysis, fbox=fbox_bb
)
t2 = time()
print("time to run pack_grid", self.env.placeMethod, t2 - t1)
print("num placed", len(self.env.molecules))
if show_plotly_plot:
self.plotly.update_title(
f"{self.env.placeMethod} took {str(round(t2 - t1, 2))}s, packed {len(self.env.molecules)}"
)
self.plotly.make_grid_heatmap(self.env)
self.plotly.add_ingredient_positions(self.env)
self.plotly.show()
def calcDistanceMatrixFastEuclidean2(self, nDimPoints):
nDimPoints = numpy.array(nDimPoints)
n, m = nDimPoints.shape
delta = numpy.zeros((n, n), "d")
for d in range(m):
data = nDimPoints[:, d]
delta += (data - data[:, numpy.newaxis]) ** 2
return numpy.sqrt(delta)
def flush(self):
import gc
import pprint
for i in range(2):
print("Collecting %d ..." % i)
n = gc.collect()
print("Unreachable objects:", n)
print("Remaining Garbage:")
pprint.pprint(gc.garbage)
del gc.garbage[:]
print
def merge(self, d1, d2, merge=lambda x, y: y):
result = dict(d1)
for k, v in d2.items():
if k in result:
result[k].extend(v)
else:
result[k] = v
return result
def plotNResult2D(self, n, bbox=[[0.0, 0, 0.0], [1000.0, 1000.0, 1000.0]]):
for i in range(n):
f = "results_seed_" + str(i) + ".json"
self.plot_one_result_2d(filename=f, bbox=bbox)
def plot_one_result_2d(
self, data=None, filename=None, bbox=[[0.0, 0, 0.0], [1000.0, 1000.0, 1000.0]]
):
if data is None and filename is None:
return
elif data is None and filename is not None:
with open(filename) as data_file:
data = json.load(data_file)
fig = pyplot.figure()
ax = fig.add_subplot(111)
radius = {}
ingrrot = {}
ingrpos = {}
for recipe in data:
for ingrname in data[recipe]:
for k in range(len(data[recipe][ingrname]["results"])):
if ingrname not in ingrrot:
ingrrot[ingrname] = []
ingrpos[ingrname] = []
radius[ingrname] = data[recipe][ingrname]["encapsulatingRadius"]
ingrrot[ingrname].append(data[recipe][ingrname]["results"][k][1])
ingrpos[ingrname].append(data[recipe][ingrname]["results"][k][0])
for ingr in ingrpos:
for i, p in enumerate(ingrpos[ingr]):
ax.add_patch(
Circle(
(p[0], p[1]), radius[ingr], edgecolor="black", facecolor="red"
)
)
ax.set_aspect(1.0)
pyplot.axhline(y=bbox[0][1], color="k")
pyplot.axhline(y=bbox[1][1], color="k")
pyplot.axvline(x=bbox[0][0], color="k")
pyplot.axvline(x=bbox[1][0], color="k")
pyplot.axis([bbox[0][0], bbox[1][0], bbox[0][1], bbox[1][1]])
pyplot.savefig("plot" + ingr + ".png")
pylab.close() # closes the current figure
return ingrpos
# res=plotOneResult(None,filename="results_seed_8.json")
def plot_one_result_3D(self, filename, width=1000.0):
plt.close("all") # closes the current figure
pos = []
s = []
c = []
for i in range(len(self.env.molecules)):
m = self.env.molecules[i]
pos.append(numpy.array(m[0]).tolist())
s.append(m[2].encapsulatingRadius ** 2)
c.append(m[2].color)
fig = plt.figure()
ax = fig.gca(projection="3d")
x, y, z = numpy.array(pos).transpose()
ax.scatter(x, y, z, s=s, c=c)
ax.legend()
ax.set_xlim3d(0, width)
ax.set_ylim3d(0, width)
ax.set_zlim3d(0, width)
plt.savefig(filename)
return x, y, z, s, c
def one_exp(self, seed, output_path, eid=0, nmol=1, periodicity=True, dim=2):
output = output_path + str(nmol)
if periodicity:
self.env.use_periodicity = True
autopack.testPeriodicity = True
else:
self.env.use_periodicity = False
autopack.testPeriodicity = False
if dim == 3:
autopack.biasedPeriodicity = [1, 1, 1]
else:
autopack.biasedPeriodicity = [1, 1, 0]
if not os.path.exists(output):
os.makedirs(output)
def getHaltonUnique(self, n):
seeds_f = numpy.array(halton(int(n * 1.5))) * int(n * 1.5)
seeds_int = numpy.array( | numpy.round(seeds_f) | numpy.round |
from __future__ import division
import numpy as np
def fun(xx):
##########################################################################
#
# ROBOT ARM FUNCTION
#
# Author: <NAME>, Colorado School of Mines
# Questions/Comments: Please email <NAME> at <EMAIL>
#
# Copyright 2016, <NAME>, Colorado School of Mines
#
# THERE IS NO WARRANTY, EXPRESS OR IMPLIED. WE DO NOT ASSUME ANY LIABILITY
# FOR THE USE OF THIS SOFTWARE. If software is modified to produce
# derivative works, such modified software should be clearly marked.
# Additionally, this program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2.0 of the License.
# Accordingly, this program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# For function details and reference information, see:
# http://www.sfu.ca/~ssurjano/
#
##########################################################################
#
# OUTPUT AND INPUTS:
#
# y = distance from the end of the arm to the origin
# xx = [theta1, theta2, theta3, theta4, L1, L2, L3, L4]
#
#########################################################################
# Shift and scale inputs from [-1,1] hypercube to describe ranges
pi = np.pi
b = pi/2
a = -pi/2
theta = (xx[0:4]+1)*(b-a)*0.5+a
L = (xx[4:8]+1)*0.5+a
L1 = L[0]
L2 = L[1]
L3 = L[2]
L4 = L[3]
T1 = theta[0]
T2 = theta[1]
T3 = theta[2]
T4 = theta[3]
u = L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4)
v = L1*np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)
f = (u**2 + v**2)**(0.5);
out = np.array([
#(1/2)*(2*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos( \
#T1+T2+T3+T4))*((-1)*L1*np.sin(T1)+(-1)*L2*np.sin(T1+T2)+(-1)*L3* \
#np.sin(T1+T2+T3)+(-1)*L4*np.sin(T1+T2+T3+T4))+2*(L1*np.cos(T1)+L2*np.cos( \
#T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))*(L1*np.sin(T1)+L2* \
#np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4)))*((L1*np.cos(T1) \
#+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos(T1+T2+T3+T4))**2+(L1* \
#np.sin(T1)+L2*np.sin(T1+T2)+L3*np.sin(T1+T2+T3)+L4*np.sin(T1+T2+T3+T4))**2) \
#**(-1/2),
1e-12,
(1/2)*(2*(L1*np.cos(T1)+L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4*np.cos( \
T1+T2+T3+T4))*((-1)*L2*np.sin(T1+T2)+(-1)*L3*np.sin(T1+T2+T3)+(-1) \
*L4*np.sin(T1+T2+T3+T4))+2*(L2*np.cos(T1+T2)+L3*np.cos(T1+T2+T3)+L4* \
np.cos(T1+T2+T3+T4))*(L1*np.sin(T1)+L2*np.sin(T1+T2)+L3* | np.sin(T1+T2+T3) | numpy.sin |
import os
import cv2
import math
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.training import moving_averages
from PIL import Image, ImageDraw
################### Mask ###################
def random_bbox(img_height=256, img_width=256, margins=0, mask_size=128, random_mask=True):
"""Generate a random tlhw with configuration.
Args:
img_height: height of image.
img_width: width of image.
margins: margins of mask and image border.
mask_size: size of mask.
random_mask: if True, random location. if False, central location.
Returns:
tuple: (top, left, height, width)
"""
if random_mask is True:
maxt = img_height - margins - mask_size
maxl = img_width - margins - mask_size
t = tf.random_uniform(
[], minval=margins, maxval=maxt, dtype=tf.int32)
l = tf.random_uniform(
[], minval=margins, maxval=maxl, dtype=tf.int32)
else:
t = (img_height - mask_size)//2
l = (img_width - mask_size)//2
h = tf.constant(mask_size)
w = tf.constant(mask_size)
return (t, l, h, w)
def bbox2mask(bbox, img_height=256, img_width=256, max_delta=32, name='mask'):
"""Generate mask tensor from bbox.
Args:
bbox: configuration tuple, (top, left, height, width)
img_height: height of image.
img_width: width of image.
max_delta: max delta of masks.
name: name of variable scope.
Returns:
tf.Tensor: output with shape [1, H, W, 1]
"""
def npmask(bbox, height, width, delta):
mask = np.zeros((1, height, width, 1), np.float32)
h = np.random.randint(delta//2+1)
w = np.random.randint(delta//2+1)
mask[:, bbox[0]+h:bbox[0]+bbox[2]-h,
bbox[1]+w:bbox[1]+bbox[3]-w, :] = 1.
return mask
with tf.variable_scope(name), tf.device('/cpu:0'):
mask = tf.py_func(
npmask,
[bbox, img_height, img_width, max_delta],
tf.float32, stateful=False)
mask.set_shape([1] + [img_height, img_width] + [1])
return mask
def brush_stroke_mask(img_height=256, img_width=256, name='mask'):
"""Generate free form mask tensor.
Returns:
tf.Tensor: output with shape [1, H, W, 1]
"""
min_num_vertex = 4
max_num_vertex = 12
mean_angle = 2*math.pi / 5
angle_range = 2*math.pi / 15
min_width = 12
max_width = 40
def generate_mask(H, W):
average_radius = math.sqrt(H*H+W*W) / 8
mask = Image.new('L', (W, H), 0)
for _ in range(np.random.randint(1, 4)):
num_vertex = np.random.randint(min_num_vertex, max_num_vertex)
angle_min = mean_angle - np.random.uniform(0, angle_range)
angle_max = mean_angle + np.random.uniform(0, angle_range)
angles = []
vertex = []
for i in range(num_vertex):
if i % 2 == 0:
angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))
else:
angles.append(np.random.uniform(angle_min, angle_max))
h, w = mask.size
vertex.append((int( | np.random.randint(0, w) | numpy.random.randint |
"""
gui/average3
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for three-dimensional averaging of particles
:author: <NAME>, 2017-2018
:copyright: Copyright (c) 2017-2018 Jungmann Lab, MPI of Biochemistry
"""
import os.path
import sys
import traceback
import colorsys
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy
from scipy import signal
from PyQt5 import QtCore, QtGui, QtWidgets
from .. import io, lib, render
from numpy.lib.recfunctions import stack_arrays
from cmath import rect, phase
from tqdm import tqdm
import scipy.ndimage.filters
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 2.0
ZOOM = 10 / 7
N_GROUP_COLORS = 8
@numba.jit(nopython=True, nogil=True)
def render_hist(x, y, oversampling, t_min, t_max):
n_pixel = int(np.ceil(oversampling * (t_max - t_min)))
in_view = (x > t_min) & (y > t_min) & (x < t_max) & (y < t_max)
x = x[in_view]
y = y[in_view]
x = oversampling * (x - t_min)
y = oversampling * (y - t_min)
image = np.zeros((n_pixel, n_pixel), dtype=np.float32)
render._fill(image, x, y)
return len(x), image
@numba.jit(nopython=True, nogil=True)
def render_histxyz(a, b, oversampling, a_min, a_max, b_min, b_max):
n_pixel_a = int(np.ceil(oversampling * (a_max - a_min)))
n_pixel_b = int(np.ceil(oversampling * (b_max - b_min)))
in_view = (a > a_min) & (b > b_min) & (a < a_max) & (b < b_max)
a = a[in_view]
b = b[in_view]
a = oversampling * (a - a_min)
b = oversampling * (b - b_min)
image = np.zeros((n_pixel_b, n_pixel_a), dtype=np.float32)
render._fill(image, a, b)
return len(a), image
def rotate_axis(axis, vx, vy, vz, angle, pixelsize):
if axis == "z":
vx_rot = np.cos(angle) * vx - np.sin(angle) * vy
vy_rot = np.sin(angle) * vx + np.cos(angle) * vy
vz_rot = vz
elif axis == "y":
vx_rot = np.cos(angle) * vx + np.sin(angle) * np.divide(vz, pixelsize)
vy_rot = vy
vz_rot = -np.sin(angle) * vx * pixelsize + np.cos(angle) * vz
elif axis == "x":
vx_rot = vx
vy_rot = np.cos(angle) * vy - np.sin(angle) * np.divide(vz, pixelsize)
vz_rot = np.sin(angle) * vy * pixelsize + np.cos(angle) * vz
return vx_rot, vy_rot, vz_rot
def compute_xcorr(CF_image_avg, image):
F_image = np.fft.fft2(image)
xcorr = np.fft.fftshift(np.real(np.fft.ifft2((F_image * CF_image_avg))))
return xcorr
class ParametersDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Parameters")
self.setModal(False)
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Oversampling:"), 0, 0)
self.oversampling = QtWidgets.QDoubleSpinBox()
self.oversampling.setRange(1, 200)
self.oversampling.setValue(DEFAULT_OVERSAMPLING)
self.oversampling.setDecimals(1)
self.oversampling.setKeyboardTracking(False)
self.oversampling.valueChanged.connect(self.window.updateLayout)
grid.addWidget(self.oversampling, 0, 1)
self.iterations = QtWidgets.QSpinBox()
self.iterations.setRange(1, 1)
self.iterations.setValue(1)
class View(QtWidgets.QLabel):
def __init__(self, window):
super().__init__()
self.window = window
self.setMinimumSize(1, 1)
self.setAlignment(QtCore.Qt.AlignCenter)
self.setAcceptDrops(True)
self._pixmap = None
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
self.open(path)
def resizeEvent(self, event):
if self._pixmap is not None:
self.set_pixmap(self._pixmap)
def set_image(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
self._pixmap = QtGui.QPixmap.fromImage(qimage)
self.set_pixmap(self._pixmap)
def set_pixmap(self, pixmap):
self.setPixmap(
pixmap.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.FastTransformation,
)
)
def update_image(self, *args):
oversampling = self.window.parameters_dialog.oversampling.value()
t_min = -self.r
t_max = self.r
N_avg, image_avg = render.render_hist(
self.locs, oversampling, t_min, t_min, t_max, t_max
)
self.set_image(image_avg)
class DatasetDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Datasets")
self.setModal(False)
self.layout = QtWidgets.QVBoxLayout()
self.checks = []
self.setLayout(self.layout)
def add_entry(self, path):
c = QtWidgets.QCheckBox(path)
self.layout.addWidget(c)
self.checks.append(c)
self.checks[-1].setChecked(True)
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Picasso: Average3")
self.resize(1024, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "average.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setAcceptDrops(True)
self.parameters_dialog = ParametersDialog(self)
self.dataset_dialog = DatasetDialog(self)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu("File")
open_action = file_menu.addAction("Open")
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open)
file_menu.addAction(open_action)
save_action = file_menu.addAction("Save")
save_action.setShortcut(QtGui.QKeySequence.Save)
save_action.triggered.connect(self.save)
file_menu.addAction(save_action)
process_menu = menu_bar.addMenu("Process")
parameters_action = process_menu.addAction("Parameters")
parameters_action.setShortcut("Ctrl+P")
parameters_action.triggered.connect(self.parameters_dialog.show)
dataset_action = process_menu.addAction("Datasets")
dataset_action.triggered.connect(self.dataset_dialog.show)
self.status_bar = self.statusBar()
self._pixmap = None
self.locs = []
self.z_state = []
self.group_index = []
self.infos = []
self.locs_paths = []
self._mode = "Zoom"
self._pan = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self.index_blocks = []
self._drift = []
# Define DisplaySettingsDialog
self.viewxy = QtWidgets.QLabel("")
self.viewxz = QtWidgets.QLabel("")
self.viewyz = QtWidgets.QLabel("")
self.viewcp = QtWidgets.QLabel("")
minsize = 512
self.viewxy.setFixedWidth(minsize)
self.viewxy.setFixedHeight(minsize)
self.viewxz.setFixedWidth(minsize)
self.viewxz.setFixedHeight(minsize)
self.viewyz.setFixedWidth(minsize)
self.viewyz.setFixedHeight(minsize)
self.viewcp.setFixedWidth(minsize)
self.viewcp.setFixedHeight(minsize)
# Define layout
display_groupbox = QtWidgets.QGroupBox("Display")
displaygrid = QtWidgets.QGridLayout(display_groupbox)
displaygrid.addWidget(QtWidgets.QLabel("XY"), 0, 0)
displaygrid.addWidget(self.viewxy, 1, 0)
displaygrid.addWidget(QtWidgets.QLabel("XZ"), 0, 1)
displaygrid.addWidget(self.viewxz, 1, 1)
displaygrid.addWidget(QtWidgets.QLabel("YZ"), 2, 0)
displaygrid.addWidget(self.viewyz, 3, 0)
displaygrid.addWidget(QtWidgets.QLabel("CP"), 2, 1)
displaygrid.addWidget(self.viewcp, 3, 1)
button_groupbox = QtWidgets.QGroupBox("Buttons")
buttongrid = QtWidgets.QGridLayout(button_groupbox)
rotation_groupbox = QtWidgets.QGroupBox("Rotation + Translation")
rotationgrid = QtWidgets.QGridLayout(rotation_groupbox)
centerofmassbtn = QtWidgets.QPushButton("Center of Mass XYZ")
axis_groupbox = QtWidgets.QGroupBox("Axis")
axisgrid = QtWidgets.QGridLayout(axis_groupbox)
self.x_axisbtn = QtWidgets.QRadioButton("X")
self.y_axisbtn = QtWidgets.QRadioButton("Y")
self.z_axisbtn = QtWidgets.QRadioButton("Z")
self.z_axisbtn.setChecked(True)
axisgrid.addWidget(self.x_axisbtn, 0, 0)
axisgrid.addWidget(self.y_axisbtn, 0, 1)
axisgrid.addWidget(self.z_axisbtn, 0, 2)
proj_groupbox = QtWidgets.QGroupBox("Projection")
projgrid = QtWidgets.QGridLayout(proj_groupbox)
self.xy_projbtn = QtWidgets.QRadioButton("XY")
self.yz_projbtn = QtWidgets.QRadioButton("YZ")
self.xz_projbtn = QtWidgets.QRadioButton("XZ")
self.xy_projbtn.setChecked(True)
projgrid.addWidget(self.xy_projbtn, 0, 0)
projgrid.addWidget(self.yz_projbtn, 0, 1)
projgrid.addWidget(self.xz_projbtn, 0, 2)
rotatebtn = QtWidgets.QPushButton("Rotate")
self.radio_sym = QtWidgets.QRadioButton("x symmetry")
self.symEdit = QtWidgets.QSpinBox()
self.symEdit.setRange(2, 100)
self.symEdit.setValue(8)
self.radio_sym_custom = QtWidgets.QRadioButton("custom symmetry")
self.symcustomEdit = QtWidgets.QLineEdit("90,180,270")
deg_groupbox = QtWidgets.QGroupBox("Degrees")
deggrid = QtWidgets.QGridLayout(deg_groupbox)
self.full_degbtn = QtWidgets.QRadioButton("Full")
self.part_degbtn = QtWidgets.QRadioButton("Part")
self.degEdit = QtWidgets.QTextEdit()
self.degEdit = QtWidgets.QSpinBox()
self.degEdit.setRange(1, 10)
self.degEdit.setValue(5)
deggrid.addWidget(self.full_degbtn, 0, 0)
deggrid.addWidget(self.part_degbtn, 0, 1)
deggrid.addWidget(self.degEdit, 0, 2)
self.full_degbtn.setChecked(True)
# Rotation Groupbox
rotationgrid.addWidget(axis_groupbox, 0, 0, 1, 2)
rotationgrid.addWidget(proj_groupbox, 1, 0, 1, 2)
rotationgrid.addWidget(deg_groupbox, 2, 0, 1, 2)
rotationgrid.addWidget(rotatebtn, 3, 0, 1, 2)
rotationgrid.addWidget(self.symEdit, 4, 0)
rotationgrid.addWidget(self.radio_sym, 4, 1)
rotationgrid.addWidget(self.radio_sym_custom, 5, 0)
rotationgrid.addWidget(self.symcustomEdit, 5, 1)
buttongrid.addWidget(centerofmassbtn, 0, 0)
buttongrid.addWidget(rotation_groupbox, 1, 0)
centerofmassbtn.clicked.connect(self.centerofmass)
rotatebtn.clicked.connect(self.rotate_groups)
self.translatebtn = QtWidgets.QCheckBox("Translate only")
self.flipbtn = QtWidgets.QCheckBox("Consider flipped structures")
self.alignxbtn = QtWidgets.QPushButton("Align X")
self.alignybtn = QtWidgets.QPushButton("Align Y")
self.alignzzbtn = QtWidgets.QPushButton("Align Z_Z")
self.alignzybtn = QtWidgets.QPushButton("Align Z_Y")
self.translatexbtn = QtWidgets.QPushButton("Translate X")
self.translateybtn = QtWidgets.QPushButton("Translate Y")
self.translatezbtn = QtWidgets.QPushButton("Translate Z")
self.rotatexy_convbtn = QtWidgets.QPushButton("Rotate XY - Convolution")
self.scorebtn = QtWidgets.QPushButton("Calculate Score")
operate_groupbox = QtWidgets.QGroupBox("Operate")
operategrid = QtWidgets.QGridLayout(operate_groupbox)
rotationgrid.addWidget(self.translatebtn, 7, 0)
rotationgrid.addWidget(self.flipbtn, 8, 0)
self.x_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("x-Range (Px)"), 9, 0)
rotationgrid.addWidget(self.x_range, 9, 1)
self.y_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("y-Range (Px)"), 10, 0)
rotationgrid.addWidget(self.y_range, 10, 1)
self.z_range = QtWidgets.QLineEdit("-1000,1000")
rotationgrid.addWidget(QtWidgets.QLabel("z-Range (nm)"), 11, 0)
rotationgrid.addWidget(self.z_range, 11, 1)
self.z_range.textChanged.connect(self.adjust_z)
self.x_range.textChanged.connect(self.adjust_xy)
self.y_range.textChanged.connect(self.adjust_xy)
operategrid.addWidget(self.alignxbtn, 0, 1)
operategrid.addWidget(self.alignybtn, 1, 1)
operategrid.addWidget(self.alignzzbtn, 2, 1)
operategrid.addWidget(self.alignzybtn, 3, 1)
operategrid.addWidget(self.translatexbtn, 0, 0)
operategrid.addWidget(self.translateybtn, 1, 0)
operategrid.addWidget(self.translatezbtn, 2, 0)
operategrid.addWidget(self.rotatexy_convbtn, 4, 0)
operategrid.addWidget(self.scorebtn, 4, 1)
self.rotatexy_convbtn.clicked.connect(self.rotatexy_convolution)
self.alignxbtn.clicked.connect(self.align_x)
self.alignybtn.clicked.connect(self.align_y)
self.alignzzbtn.clicked.connect(self.align_zz)
self.alignzybtn.clicked.connect(self.align_zy)
self.translatexbtn.clicked.connect(self.translate_x)
self.translateybtn.clicked.connect(self.translate_y)
self.translatezbtn.clicked.connect(self.translate_z)
self.scorebtn.clicked.connect(self.calculate_score)
buttongrid.addWidget(operate_groupbox, 2, 0)
self.contrastEdit = QtWidgets.QDoubleSpinBox()
self.contrastEdit.setDecimals(1)
self.contrastEdit.setRange(0, 10)
self.contrastEdit.setValue(0.5)
self.contrastEdit.setSingleStep(0.1)
self.contrastEdit.valueChanged.connect(self.updateLayout)
self.grid = QtWidgets.QGridLayout()
self.grid.addWidget(display_groupbox, 0, 0, 2, 1)
self.grid.addWidget(button_groupbox, 0, 1, 1, 1)
contrast_groupbox = QtWidgets.QGroupBox("Contrast")
contrastgrid = QtWidgets.QGridLayout(contrast_groupbox)
contrastgrid.addWidget(self.contrastEdit)
buttongrid.addWidget(contrast_groupbox)
MODEL_X_DEFAULT = "0,20,40,60,0,20,40,60,0,20,40,60"
MODEL_Y_DEFAULT = "0,20,40,0,20,40,0,20,40,0,20,40"
MODEL_Z_DEFAULT = "0,0,0,0,0,0,0,0,0,0,0,0"
self.modelchk = QtWidgets.QCheckBox("Use Model")
self.model_x = QtWidgets.QLineEdit(MODEL_X_DEFAULT)
self.model_y = QtWidgets.QLineEdit(MODEL_Y_DEFAULT)
self.model_z = QtWidgets.QLineEdit(MODEL_Z_DEFAULT)
self.model_preview_btn = QtWidgets.QPushButton("Preview")
self.model_preview_btn.clicked.connect(self.model_preview)
self.modelblurEdit = QtWidgets.QDoubleSpinBox()
self.modelblurEdit.setDecimals(1)
self.modelblurEdit.setRange(0, 10)
self.modelblurEdit.setValue(0.5)
self.modelblurEdit.setSingleStep(0.1)
self.pixelsizeEdit = QtWidgets.QSpinBox()
self.pixelsizeEdit.setRange(1, 999)
self.pixelsizeEdit.setValue(130)
model_groupbox = QtWidgets.QGroupBox("Model")
modelgrid = QtWidgets.QGridLayout(model_groupbox)
modelgrid.addWidget(self.modelchk, 0, 0)
modelgrid.addWidget(QtWidgets.QLabel("X-Coordinates"), 1, 0)
modelgrid.addWidget(self.model_x, 1, 1)
modelgrid.addWidget(QtWidgets.QLabel("Y-Coordinates"), 2, 0)
modelgrid.addWidget(self.model_y, 2, 1)
modelgrid.addWidget(QtWidgets.QLabel("Z-Coordinates"), 3, 0)
modelgrid.addWidget(self.model_z, 3, 1)
modelgrid.addWidget(QtWidgets.QLabel("Blur:"), 4, 0)
modelgrid.addWidget(self.modelblurEdit, 4, 1)
modelgrid.addWidget(QtWidgets.QLabel("Pixelsize:"), 5, 0)
modelgrid.addWidget(self.pixelsizeEdit, 5, 1)
modelgrid.addWidget(self.model_preview_btn, 6, 0)
modelgrid.addWidget(self.modelchk, 6, 1)
buttongrid.addWidget(model_groupbox)
mainWidget = QtWidgets.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.status_bar.showMessage("Average3 ready.")
def open(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open localizations", filter="*.hdf5"
)
if path:
self.add(path)
def save(self, path):
n_channels = len(self.locs)
for i in range(n_channels):
cx = self.infos[i][0]["Width"] / 2
cy = self.infos[i][0]["Height"] / 2
out_locs = self.locs[i].copy()
out_locs.x += cx
out_locs.y += cy
info = self.infos[i] + [{"Generated by": "Picasso Average3"}]
if not self.z_state[i]:
out_locs = lib.remove_from_rec(out_locs, "z")
out_path = os.path.splitext(self.locs_paths[i])[0] + "_avg3.hdf5"
path, exe = QtWidgets.QFileDialog.getSaveFileName(
self, "Save localizations", out_path, filter="*.hdf5"
)
io.save_locs(path, out_locs, info)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
print("Opening {} ..".format(path))
self.add(path)
def add(self, path, rendermode=True):
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
if len(self.locs) == 0:
self.pixelsize = 0
if not hasattr(locs, "group"):
msgBox = QtWidgets.QMessageBox(self)
msgBox.setWindowTitle("Error")
msgBox.setText(
("Datafile does not contain group information."
" Please load file with picked localizations.")
)
msgBox.exec_()
else:
locs = lib.ensure_sanity(locs, info)
if not hasattr(locs, "z"):
locs = lib.append_to_rec(locs, locs.x.copy(), "z")
self.pixelsize = 1
has_z = False
else:
has_z = True
if self.pixelsize == 0:
pixelsize, ok = QtWidgets.QInputDialog.getInt(
self,
"Pixelsize Dialog",
"Please enter the pixelsize in nm",
130,
)
if ok:
self.pixelsize = pixelsize
else:
self.pixelsize = 130
self.locs.append(locs)
self.z_state.append(has_z)
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
self._drift.append(None)
self.dataset_dialog.add_entry(path)
self.dataset_dialog.checks[-1].stateChanged.connect(
self.updateLayout
)
cx = self.infos[-1][0]["Width"] / 2
cy = self.infos[-1][0]["Height"] / 2
self.locs[-1].x -= cx
self.locs[-1].y -= cy
if len(self.locs) == 1:
self.median_lp = np.mean(
[np.median(locs.lpx), np.median(locs.lpy)]
)
if hasattr(locs, "group"):
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[groupcopy]
if render:
self.fit_in_view(autoscale=True)
else:
if render:
self.update_scene()
self.oversampling = 1
if len(self.locs) == 1:
self.t_min = np.min([np.min(locs.x), np.min(locs.y)])
self.t_max = np.max([np.max(locs.x), np.max(locs.y)])
self.z_min = np.min(locs.z)
self.z_max = np.max(locs.z)
else:
self.t_min = np.min(
[np.min(locs.x), np.min(locs.y), self.t_min]
)
self.t_max = np.max(
[np.max(locs.x), np.max(locs.y), self.t_max]
)
self.z_min = np.min([np.min(locs.z), self.z_min])
self.z_max = np.min([np.max(locs.z), self.z_max])
if len(self.locs) == 1:
print("Dataset loaded from {}.".format(path))
else:
print(
("Dataset loaded from {},"
" Total number of datasets {}.").format(
path, len(self.locs)
)
)
# CREATE GROUP INDEX
if hasattr(locs, "group"):
groups = np.unique(locs.group)
n_groups = len(groups)
n_locs = len(locs)
group_index = scipy.sparse.lil_matrix(
(n_groups, n_locs), dtype=np.bool
)
progress = lib.ProgressDialog(
"Creating group index", 0, len(groups), self
)
progress.set_value(0)
for i, group in enumerate(groups):
index = np.where(locs.group == group)[0]
group_index[i, index] = True
progress.set_value(i + 1)
self.group_index.append(group_index)
self.n_groups = n_groups
os.chdir(os.path.dirname(path))
self.calculate_radii()
self.oversampling = 4
self.updateLayout()
def updateLayout(self):
if len(self.locs) > 0:
pixmap1, pixmap2, pixmap3 = self.hist_multi_channel(self.locs)
self.viewxy.setPixmap(pixmap1)
self.viewxz.setPixmap(pixmap2)
self.viewyz.setPixmap(pixmap3)
def centerofmass_all(self):
# Align all by center of mass
n_channels = len(self.locs)
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
# stack arrays
sel_locs_x = self.locs[j].x
sel_locs_y = self.locs[j].y
sel_locs_z = self.locs[j].z
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
self.locs[j].x -= mean_x
self.locs[j].y -= mean_y
self.locs[j].z -= mean_z
def calculate_radii(self):
# CALCULATE PROPER R VALUES
n_channels = len(self.locs)
self.r = 0
self.r_z = 0
for j in range(n_channels):
self.r = np.max(
[
3
* np.sqrt(
np.mean(self.locs[j].x ** 2 + self.locs[j].y ** 2)
),
self.r,
]
)
self.r_z = np.max(
[5 * np.sqrt(np.mean(self.locs[j].z ** 2)), self.r_z]
)
self.t_min = -self.r
self.t_max = self.r
self.z_min = -self.r_z
self.z_max = self.r_z
self.z_min_load = self.z_min.copy()
self.z_max_load = self.z_max.copy()
def centerofmass(self):
print("Aligning by center of mass.. ", end="", flush=True)
n_groups = self.n_groups
n_channels = len(self.locs)
progress = lib.ProgressDialog(
"Aligning by center of mass", 0, n_groups, self
)
progress.set_value(0)
for i in range(n_groups):
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
index = self.group_index[j][i, :].nonzero()[1]
# stack arrays
sel_locs_x = self.locs[j].x[index]
sel_locs_y = self.locs[j].y[index]
sel_locs_z = self.locs[j].z[index]
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
progress.set_value(i + 1)
out_locs_x = stack_arrays(
out_locs_x, asrecarray=True, usemask=False
)
out_locs_y = stack_arrays(
out_locs_y, asrecarray=True, usemask=False
)
out_locs_z = stack_arrays(
out_locs_z, asrecarray=True, usemask=False
)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
index = self.group_index[j][i, :].nonzero()[1]
self.locs[j].x[index] -= mean_x
self.locs[j].y[index] -= mean_y
self.locs[j].z[index] -= mean_z
self.calculate_radii()
self.updateLayout()
print("Complete.")
def histtoImage(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def hist_multi_channel(self, locs):
oversampling = self.parameters_dialog.oversampling.value()
self.oversampling = oversampling
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
renderings = []
for i in range(n_channels):
if self.dataset_dialog.checks[i].isChecked():
renderings.append(
render.render_hist3d(
locs[i],
oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
)
images = np.array([_[1] for _ in renderings])
pixmap1 = self.pixmap_from_colors(images, colors, 2)
pixmap2 = self.pixmap_from_colors(images, colors, 0)
pixmap3 = self.pixmap_from_colors(images, colors, 1)
return pixmap1, pixmap2, pixmap3
def pixmap_from_colors(self, images, colors, axisval):
if axisval == 2:
image = [np.sum(_, axis=axisval) for _ in images]
else:
image = [np.transpose(np.sum(_, axis=axisval)) for _ in images]
image = np.array([self.scale_contrast(_) for _ in image])
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def align_x(self):
print("Align X")
self.align_all("x")
def align_y(self):
print("Align Y")
self.align_all("y")
def align_zz(self):
print("Align Z")
self.align_all("zz")
def align_zy(self):
print("Align Z")
self.align_all("zy")
def translate_x(self):
print("Translate X")
self.translate("x")
def translate_y(self):
print("Translate Y")
self.translate("y")
def translate_z(self):
print("Translate Z")
self.translate("z")
def translate(self, translateaxis):
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
if translateaxis == "x":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
elif translateaxis == "y":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=1) for _ in image]
elif translateaxis == "z":
image = [np.sum(_, axis=1) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
for element in signalimg:
plt.plot(element)
n_groups = self.group_index[0].shape[0]
print("Translating..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.translate_group(signalimg, i, translateaxis)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
self.centerofmass_all()
self.updateLayout()
self.status_bar.showMessage("Done!")
def translate_group(self, signalimg, group, translateaxis):
n_channels = len(self.locs)
all_xcorr = np.zeros((1, n_channels))
all_da = np.zeros((1, n_channels))
if translateaxis == "x":
proplane = "xy"
elif translateaxis == "y":
proplane = "xy"
elif translateaxis == "z":
proplane = "xz"
plotmode = 0
for j in range(n_channels):
if plotmode:
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(signalimg[j])
ax2 = fig.add_subplot(1, 3, 2)
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
plane = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) #
if translateaxis == "x":
projection = np.sum(plane, axis=0)
elif translateaxis == "y":
projection = np.sum(plane, axis=1)
elif translateaxis == "z":
projection = np.sum(plane, axis=1)
if plotmode:
plt.plot(projection)
# print('Step X')
# ax3 = fig.add_subplot(1,3,3)
# plt.imshow(plane, interpolation='nearest', cmap=plt.cm.ocean)
corrval = np.max(signal.correlate(signalimg[j], projection))
shiftval = (
np.argmax(signal.correlate(signalimg[j], projection))
- len(signalimg[j])
+ 1
)
all_xcorr[0, j] = corrval
all_da[0, j] = shiftval / self.oversampling
if plotmode:
plt.show()
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
dafinal = np.mean(all_da[maximumcc, :])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
if translateaxis == "x":
self.locs[j].x[index] += dafinal
elif translateaxis == "y":
self.locs[j].y[index] += dafinal
elif translateaxis == "z":
self.locs[j].z[index] += dafinal * self.pixelsize
def adjust_z(self):
z_range_str = np.asarray((self.z_range.text()).split(","))
z_range = []
for element in z_range_str:
try:
z_range.append(float(element))
except ValueError:
pass
z_min = z_range[0]
z_max = z_range[1]
self.z_min = np.max([z_min, self.z_min_load])
self.z_max = np.min([z_max, self.z_max_load])
print("Z min {}, Z max {}".format(self.z_min, self.z_max))
self.updateLayout()
def adjust_xy(self):
x_range_str = np.asarray((self.x_range.text()).split(","))
x_range = []
for element in x_range_str:
try:
x_range.append(float(element))
except ValueError:
pass
x_min = x_range[0]
x_max = x_range[1]
self.x_min = np.max([x_min, self.t_min])
self.x_max = np.min([x_max, self.t_max])
print("X min {}, X max {}".format(self.x_min, self.x_max))
y_range_str = np.asarray((self.y_range.text()).split(","))
y_range = []
for element in y_range_str:
try:
y_range.append(float(element))
except ValueError:
pass
y_min = y_range[0]
y_max = y_range[1]
self.y_min = np.max([y_min, self.t_min])
self.y_max = np.min([y_max, self.t_max])
print("Y min {}, Y max {}".format(self.y_min, self.y_max))
self.updateLayout()
def rotatexy_convolution_group(
self, CF_image_avg, angles, group, rotaxis, proplane
):
n_channels = len(self.locs)
n_angles = len(angles)
all_xcorr = np.zeros((n_angles, n_channels))
all_da = np.zeros((n_angles, n_channels))
all_db = np.zeros((n_angles, n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
# rotate locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
)
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image, interpolation="nearest", cmap=plt.cm.ocean
)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = np.sum(np.multiply(CF_image_avg[j], image))
all_xcorr[k, j] = xcorr
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
def rotatexy_convolution(self):
# TODO: re-write ths with kwargs at some point
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2 * np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(
-degree / 360 * 2 * np.pi, degree / 360 * 2 * np.pi, a_step
)
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
image = [ | np.sum(_, axis=2) | numpy.sum |
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>, January 2017 / February 2018.
"""
import numpy as np
from scipy.stats import multivariate_normal
import time
from joblib import Parallel, delayed
import sys
from functools import reduce
from scipy.stats import triang
import torch
from scipy.signal import medfilt
def lhs(minn,maxn,N): # Latin Hypercube sampling
# Here minn and maxn are assumed to be 1xd arrays
x = np.zeros((N,minn.shape[1]))
for j in range (0,minn.shape[1]):
idx = np.random.permutation(N)+0.5
P =(idx - x[:,j])/N
x[:,j] = minn[0,j] + P*(maxn[0,j] - minn[0,j])
return x
def GenCR(MCMCPar,pCR):
if type(pCR) is np.ndarray:
p=np.ndarray.tolist(pCR)[0]
else:
p=pCR
CR=np.zeros((MCMCPar.seq * MCMCPar.steps),dtype=np.float)
L = np.random.multinomial(MCMCPar.seq * MCMCPar.steps, p, size=1)
L2 = np.concatenate((np.zeros((1),dtype=np.int), np.cumsum(L)),axis=0)
r = np.random.permutation(MCMCPar.seq * MCMCPar.steps)
for zz in range(0,MCMCPar.nCR):
i_start = L2[zz]
i_end = L2[zz+1]
idx = r[i_start:i_end]
CR[idx] = np.float(zz+1)/MCMCPar.nCR
CR = np.reshape(CR,(MCMCPar.seq,MCMCPar.steps))
return CR, L
def CalcDelta(nCR,delta_tot,delta_normX,CR):
# Calculate total normalized Euclidean distance for each crossover value
# Derive sum_p2 for each different CR value
for zz in range(0,nCR):
# Find which chains are updated with zz/MCMCPar.nCR
idx = np.argwhere(CR==(1.0+zz)/nCR);idx=idx[:,0]
# Add the normalized squared distance tot the current delta_tot;
delta_tot[0,zz] = delta_tot[0,zz] + np.sum(delta_normX[idx])
return delta_tot
def AdaptpCR(seq,delta_tot,lCR,pCR_old):
if np.sum(delta_tot) > 0:
pCR = seq * (delta_tot/lCR) / np.sum(delta_tot)
pCR = pCR/np.sum(pCR)
else:
pCR=pCR_old
return pCR
def CompLikelihood(X,fx,MCMCPar,Measurement,Extra):
if MCMCPar.lik==0: # fx contains log-density
of = np.exp(fx)
log_p= fx
elif MCMCPar.lik==1: # fx contains density
of = fx
log_p= | np.log(of) | numpy.log |
from __future__ import division
import numpy as np
def linear_merge(x1, y1, x2, y2):
"""
Merge data pairs (x1, y1) and (x2, y2) over the intersection
of their ranges (x) using a linear interpolation of each
dataset to interpolate between unaligned values.
No extrapolation is performed.
Input
=====
:x1, array-like: abscissa coordinates of dataset 1
:y1, array-like: ordinate coordinates of dataset 1
:x2, array-like: abscissa coordinates of dataset 2
:y2, array-like: ordinate coordinates of dataset 2
OUT
===
Tuple of the merged x (`xm`) and interpolated y1 (`y1m`) and y2 (`y2m`):
`(xm, y1m, y2m)`
"""
# ensure all values are ndarrays
x1 = np.asarray(x1)
x2 = np.asarray(x2)
y1 = np.asarray(y1)
y2 = np.asarray(y2)
# ##########
# merge on x
xmerge = np.concatenate((np.sort(x1), np.sort(x2)))
xmerge.sort(kind='mergesort')
# perform interpolation
xlo = np.max((x1.min(), x2.min()))
xhi = np.min((x1.max(), x2.max()))
# keep only the merged x values within the intersection of
# the data ranges
mask = (xmerge >= xlo) & (xmerge <= xhi)
xf = xmerge[mask]
y1f = np.interp(xf, x1, y1)
y2f = | np.interp(xf, x2, y2) | numpy.interp |
"""Fit the averaged delta sigma profiles.
"""
from catalog import *
import numpy as np
import cluster_toolkit as ct
import scipy.optimize as op
import matplotlib.pyplot as plt
def get_model(M, args):
Redges = args['Redges']
Rlam = args['Rlam']
h = args['h']
Om = args['Om']
z = args['z']
r = args['r3d'] #Mpc/h comoving
Rperp = args['Rperp'] #Mpc/h comoving
SCI = args['SCI']
k = args['k']
Plin = args['Plin']
xi_mm = args['xi_mm']
c,tau,fmis,Am,B0,Rs = args['params']
#c = ct.concentration.concentration_at_M(M, k, Plin, ns, Ob, Om, h, Mass_type='mean')
xi_nfw = ct.xi.xi_nfw_at_R(r, M, c, Om)
bias = ct.bias.bias_at_M(M,k,Plin,Om)
xi_2halo = ct.xi.xi_2halo(bias, xi_mm)
xi_hm = ct.xi.xi_hm(xi_nfw, xi_2halo)
#print("%3e"%M)
#print(bias)
#rdata = np.loadtxt("testdata/r.txt")
#xid = np.load("testdata/hmcfs_z006_0.05sigintr.npy")
#print(xid.shape)
##xid = xid[1]
#plt.loglog(r, xi_hm)
#plt.loglog(r, xi_mm, ls=':')
#plt.loglog(rdata, xid)
#plt.show()
#exit()
Rmis = tau*Rlam #Mpc/h comoving
#Sigmas are in Msun h/pc^2 comoving
Sigma = ct.deltasigma.Sigma_at_R(Rperp, r, xi_hm, M, c, Om)
Sigma_mis = ct.miscentering.Sigma_mis_at_R(Rperp, Rperp, Sigma, M, c, Om, Rmis, kernel="exponential")
full_Sigma = (1-fmis)*Sigma + fmis*Sigma_mis
kappa = SCI*full_Sigma*h*(1+z)**2
#DeltaSigmas are in Msun/pc^2 physical
DeltaSigma = ct.deltasigma.DeltaSigma_at_R(Rperp, Rperp, Sigma, M, c, Om) *h*(1+z)**2
DeltaSigma_mis = ct.miscentering.DeltaSigma_mis_at_R(Rperp, Rperp, Sigma_mis) *h*(1+z)**2
full_DS = (1-fmis)*DeltaSigma + fmis*DeltaSigma_mis
#Apply corrections
B = args['boost']
full_DS *= Am/(B*(1-kappa))
ave_fDS = ct.averaging.average_profile_in_bins(Redges, Rperp/(h*(1+z)), full_DS)
return ave_fDS
def lnlike(pars, args):
Mtrue = args['Mass']
Cal = pars
M = Mtrue/Cal
DSmodel = get_model(M, args)[args['inds']]
#Get the data
DSd = args['DSd']
icov = args['icov']
X = DSd - DSmodel
chi2 = -0.5*np.dot(X,np.dot(icov,X))
return chi2
if __name__ == "__main__":
#Load in the halo catalog
sigs = np.arange(0.05, 0.45, step=0.05)
inds = [6,7,8,9]
bins = np.array([20,30,45,60,999])
zs = [1.0, 0.5, 0.25, 0.0]
zmap = [2,1,0,0] #Map from fox zi to data zi, for SAC matrices
covpath = "/Users/tom/Data/DESY1/RMWL/SACs/SAC_z%_l%d.txt"
datapath = "ds_testdata/DSave_z%03d_%.2fsigintr.npy"
halopath = "/Users/tom/Data/DESY1/RMWL/fox_files/halo_catalogs/reduced_halos_lamobs_%.2fsigintr_%03d.npy"
#Output path
outpath = "calibration_fits/result_%.2fsigintr.npy"
for sig in sigs:
outarray = np.zeros((6, 16)) #6 columns, 16 rows for each z-Lambda bin in the sim
#zindex, lindex, Mtrue, lambda, cal, calunc
for i,ind in enumerate(inds):
print(i,ind)
outarray[0, i*4:(i+1)*4] = ind #Z index
outarray[1, i*4:(i+1)*4] = np.arange(4)+3 #l index
zid = zmap[i] #Z index for data
z = zs[i]
deltap1s = | np.loadtxt("Y1_deltap1.txt") | numpy.loadtxt |
from operator import add
import random
from attacks import utils
import torch.nn.functional as F
import numpy as np
import torch
import scipy.sparse as sp
from attacks.attack import gcn_norm
def dice_injection(adj, n_inject, n_edge_max, origin_labels, target_idx, device):
n_classes = max(origin_labels)+1
class_pos = [[] for i in range(n_classes)]
for i in origin_labels:
class_id = origin_labels[i]
class_pos[class_id].append(i)
direct_edges = n_edge_max//2 # number of edges connect to target nodes
bridge_edges = n_edge_max-direct_edges # number of edges connect to different classes
n_node = adj.size(0)
adj=utils.tensor_to_adj(adj)
target_idx = target_idx.cpu()
n_test = target_idx.shape[0]
new_edges_x = []
new_edges_y = []
new_data = []
# connect injected nodes to target nodes
for i in range(n_inject):
islinked = np.zeros(n_test)
for j in range(direct_edges):
x = i + n_node
yy = random.randint(0, n_test - 1)
while islinked[yy] > 0:
yy = random.randint(0, n_test - 1)
islinked[yy] = 1
y = target_idx[yy]
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
add1 = sp.csr_matrix((n_inject, n_node))
add2 = sp.csr_matrix((n_node + n_inject, n_inject))
adj_attack = sp.vstack([adj, add1])
adj_attack = sp.hstack([adj_attack, add2])
adj_attack.row = np.hstack([adj_attack.row, new_edges_x])
adj_attack.col = np.hstack([adj_attack.col, new_edges_y])
adj_attack.data = np.hstack([adj_attack.data, new_data])
adj_attack = utils.adj_to_tensor(adj_attack).to(device)
return adj_attack
def random_class_injection(adj, n_inject, n_edge_max, origin_labels, target_idx, device, not_full=False):
n_classes = max(origin_labels)+1
class_pos = [[] for i in range(n_classes)]
min_class_len = len(target_idx)
for (i,pos) in enumerate(target_idx):
class_id = origin_labels[pos]
class_pos[class_id].append(i)
for c in class_pos:
min_class_len = min(min_class_len,len(class_pos[class_id]))
if not not_full:
assert min_class_len >= n_edge_max, print(f"min_class_len {min_class_len}")
n_node = adj.size(0)
adj=utils.tensor_to_adj(adj)
target_idx = target_idx.cpu()
n_test = target_idx.shape[0]
new_edges_x = []
new_edges_y = []
new_data = []
for i in range(n_inject):
islinked = np.zeros(n_test)
class_id = random.randint(0, n_classes-1)
n_connections = min(len(class_pos[class_id]),n_edge_max)
for j in range(n_connections):
x = i + n_node
yy = random.randint(0, len(class_pos[class_id]) - 1)
while islinked[class_pos[class_id][yy]] > 0:
yy = random.randint(0, len(class_pos[class_id]) - 1)
islinked[class_pos[class_id][yy]] = 1
y = target_idx[class_pos[class_id][yy]]
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
add1 = sp.csr_matrix((n_inject, n_node))
add2 = sp.csr_matrix((n_node + n_inject, n_inject))
adj_attack = sp.vstack([adj, add1])
adj_attack = sp.hstack([adj_attack, add2])
adj_attack.row = np.hstack([adj_attack.row, new_edges_x])
adj_attack.col = np.hstack([adj_attack.col, new_edges_y])
adj_attack.data = np.hstack([adj_attack.data, new_data])
adj_attack = utils.adj_to_tensor(adj_attack).to(device)
return adj_attack
def random_injection(adj, n_inject, n_edge_max, target_idx, device):
n_node = adj.size(0)
adj=utils.tensor_to_adj(adj)
target_idx = target_idx.cpu()
n_test = target_idx.shape[0]
new_edges_x = []
new_edges_y = []
new_data = []
for i in range(n_inject):
islinked = np.zeros(n_test)
for j in range(n_edge_max):
x = i + n_node
yy = random.randint(0, n_test - 1)
while islinked[yy] > 0:
yy = random.randint(0, n_test - 1)
# BUG: never duplicating linked nodes
# solution
islinked[yy] = 1
y = target_idx[yy]
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
add1 = sp.csr_matrix((n_inject, n_node))
add2 = sp.csr_matrix((n_node + n_inject, n_inject))
adj_attack = sp.vstack([adj, add1])
adj_attack = sp.hstack([adj_attack, add2])
adj_attack.row = np.hstack([adj_attack.row, new_edges_x])
adj_attack.col = np.hstack([adj_attack.col, new_edges_y])
adj_attack.data = np.hstack([adj_attack.data, new_data])
adj_attack = utils.adj_to_tensor(adj_attack).to(device)
return adj_attack
def tdgia_injection(adj, n_inject, n_edge_max, origin_labels, current_pred,
target_idx, device, self_connect_ratio=0.0, weight1=0.9, weight2=0.1):
n_current = adj.size(0)
adj=utils.tensor_to_adj(adj)
target_idx = target_idx.cpu()
n_test = target_idx.size(0)
n_classes = origin_labels.max() + 1
n_connect = int(n_edge_max * (1 - self_connect_ratio))
n_self_connect = int(n_edge_max * self_connect_ratio)
new_edges_x = []
new_edges_y = []
new_data = []
add_score = np.zeros(n_test)
deg = np.array(adj.sum(axis=0))[0] + 1.0
for i in range(n_test):
it = target_idx[i]
label = origin_labels[it]
score = current_pred[it][label] + 2
add_score1 = score / deg[it]
add_score2 = score / np.sqrt(deg[it])
sc = weight1 * add_score1 + weight2 * add_score2 / np.sqrt(n_connect + n_self_connect)
add_score[i] = sc
# higher score is better
sorted_rank = add_score.argsort()
sorted_rank = sorted_rank[-n_inject * n_connect:]
labelgroup = np.zeros(n_classes)
# separate them by origin_labels
labelil = []
for i in range(n_classes):
labelil.append([])
random.shuffle(sorted_rank)
for i in sorted_rank:
label = origin_labels[target_idx[i]]
labelgroup[label] += 1
labelil[label].append(i)
pos = np.zeros(n_classes)
for i in range(n_inject):
for j in range(n_connect):
smallest = 1
small_id = 0
for k in range(n_classes):
if len(labelil[k]) > 0:
if (pos[k] / len(labelil[k])) < smallest:
smallest = pos[k] / len(labelil[k])
small_id = k
# print((k,smallest))
# if smallest == 1:
# for k in range(n_classes):
# print((pos[k],len(labelil[k])))
# print((len(target_idx),n_inject, n_edge_max))
tu = labelil[small_id][int(pos[small_id])]
pos[small_id] += 1
x = n_current + i
y = target_idx[tu]
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
is_linked = np.zeros((n_inject, n_inject))
for i in range(n_inject):
rnd_times = 100
while np.sum(is_linked[i]) < n_self_connect and rnd_times > 0:
x = i + n_current
rnd_times = 100
yy = random.randint(0, n_inject - 1)
while (np.sum(is_linked[yy]) >= n_self_connect or yy == i or
is_linked[i][yy] == 1) and (rnd_times > 0):
yy = random.randint(0, n_inject - 1)
rnd_times -= 1
if rnd_times > 0:
y = n_current + yy
is_linked[i][yy] = 1
is_linked[yy][i] = 1
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
add1 = sp.csr_matrix((n_inject, n_current))
add2 = sp.csr_matrix((n_current + n_inject, n_inject))
adj_attack = sp.vstack([adj, add1])
adj_attack = sp.hstack([adj_attack, add2])
adj_attack.row = np.hstack([adj_attack.row, new_edges_x])
adj_attack.col = np.hstack([adj_attack.col, new_edges_y])
adj_attack.data = np.hstack([adj_attack.data, new_data])
adj_attack = utils.adj_to_tensor(adj_attack).to(device)
return adj_attack
def atdgia_injection(adj, n_inject, n_edge_max, origin_labels, current_pred,
target_idx, device, self_connect_ratio=0.0, weight1=0.9, weight2=0.1):
n_current = adj.size(0)
adj=utils.tensor_to_adj(adj)
target_idx = target_idx.cpu()
n_test = target_idx.size(0)
n_classes = origin_labels.max() + 1
n_connect = int(n_edge_max * (1 - self_connect_ratio))
n_self_connect = int(n_edge_max * self_connect_ratio)
new_edges_x = []
new_edges_y = []
new_data = []
add_score = np.zeros(n_test)
deg = np.array(adj.sum(axis=0))[0] + 1.0
for i in range(n_test):
it = target_idx[i]
label = origin_labels[it]
cur_label = current_pred[it].argmax()
if cur_label==label:
score = 1.0 - current_pred[it][label]
else:
score = 0
# score = current_pred[it][label] + 2
add_score1 = score / deg[it]
add_score2 = score / | np.sqrt(deg[it]) | numpy.sqrt |
__copyright__ = "Copyright 2016-2019, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import unittest
import numpy as np
from vmaf.config import VmafConfig
from vmaf.tools.reader import YuvReader
class YuvReaderTest(unittest.TestCase):
def test_yuv_reader(self):
yuv_reader = YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"),
width=576,
height=324,
yuv_type='yuv420p'
)
self.assertEquals(yuv_reader.num_bytes, 13436928)
self.assertEquals(yuv_reader.num_frms, 48)
self.assertEquals(yuv_reader._get_uv_width_height_multiplier(), (0.5, 0.5))
def test_with(self):
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"),
width=576,
height=324,
yuv_type='yuv420p'
) as yuv_reader:
assert hasattr(yuv_reader.file, "read")
def test_next_y_u_v(self):
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"),
width=576,
height=324,
yuv_type='yuv420p'
) as yuv_reader:
y, u, v = yuv_reader.__next__()
self.assertEquals(y[0][0], 87)
self.assertEquals(y[0][1], 131)
self.assertEquals(y[1][0], 95)
self.assertEquals(u[0][0], 92)
self.assertEquals(u[0][1], 97)
self.assertEquals(u[1][0], 90)
self.assertEquals(v[0][0], 121)
self.assertEquals(v[0][1], 126)
self.assertEquals(v[1][0], 122)
self.assertAlmostEquals(y.mean(), 61.928749785665296, places=4)
self.assertAlmostEquals(u.mean(), 114.6326517489712, places=4)
self.assertAlmostEquals(v.mean(), 122.05084019204389, places=4)
y, u, v = yuv_reader.__next__()
self.assertEquals(y[0][0], 142)
self.assertEquals(y[0][1], 128)
self.assertEquals(y[1][0], 134)
self.assertEquals(u[0][0], 93)
self.assertEquals(u[0][1], 102)
self.assertEquals(u[1][0], 91)
self.assertEquals(v[0][0], 128)
self.assertEquals(v[0][1], 126)
self.assertEquals(v[1][0], 124)
self.assertAlmostEquals(y.mean(), 61.265260631001375, places=4)
self.assertAlmostEquals(u.mean(), 114.72515860768175, places=4)
self.assertAlmostEquals(v.mean(), 122.12022033607681, places=4)
def test_iteration(self):
y_1stmoments = []
y_2ndmoments = []
with YuvReader(
filepath=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"),
width=576, height=324, yuv_type='yuv420p') as yuv_reader:
for y, u, v in yuv_reader:
y_1stmoments.append(y.mean())
y_2ndmoments.append(y.var() + y.mean() * y.mean())
self.assertEquals(len(y_1stmoments), 48)
self.assertEquals(len(y_2ndmoments), 48)
self.assertAlmostEquals( | np.mean(y_1stmoments) | numpy.mean |
"""
matplotlib helper functions for commong drawing tasks.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
import scipy.spatial
from ..math import eigsorted, nancov
from ..text import int_to_alpha
from ..missing import cooccurence_pattern
from .interpolation import interpolated_patch_path
from .axes import add_colorbar, subaxes
from ..log import Handle
logger = Handle(__name__)
try:
from sklearn.decomposition import PCA
except ImportError:
msg = "scikit-learn not installed"
logger.warning(msg)
def alphalabel_subplots(ax, fmt="{}", xy=(0.03, 0.95), ha="left", va="top", **kwargs):
"""
Add alphabetical labels to a successive series of subplots with a specified format.
Parameters
-----------
ax : :class:`list` | :class:`numpy.ndarray` | :class:`numpy.flatiter`
Axes to label, in desired order.
fmt : :class:`str`
Format string to use. To add e.g. parentheses, you could specify :code:`"({})"`.
xy : :class:`tuple`
Position of the labels in axes coordinates.
ha : :class:`str`
Horizontal alignment of the labels (:code:`{"left", "right"}`).
va : :class:`str`
Vertical alignment of the labels (:code:`{"top", "bottom"}`).
"""
flat = np.array(ax).flatten()
# get axes in case of iterator which is consumed
_ax = [(ix, flat[ix]) for ix in range(len(flat))]
labels = [(a, fmt.format(int_to_alpha(ix))) for ix, a in _ax]
[
a.annotate(label, xy=xy, xycoords=a.transAxes, ha=ha, va=va, **kwargs)
for a, label in labels
]
def get_centroid(poly):
"""
Centroid of a closed polygon using the Shoelace formula.
Parameters
----------
poly : :class:`matplotlib.patches.Polygon`
Polygon to obtain the centroid of.
Returns
-------
cx, cy : :class:`tuple`
Centroid coordinates.
"""
# get signed area
verts = poly.get_xy()
A = 0
cx, cy = 0, 0
x, y = verts.T
for i in range(len(verts) - 1):
A += x[i] * y[i + 1] - x[i + 1] * y[i]
cx += (x[i] + x[i + 1]) * (x[i] * y[i + 1] - x[i + 1] * y[i])
cy += (y[i] + y[i + 1]) * (x[i] * y[i + 1] - x[i + 1] * y[i])
A /= 2
cx /= 6 * A
cy /= 6 * A
return cx, cy
def rect_from_centre(x, y, dx=0, dy=0, **kwargs):
"""
Takes an xy point, and creates a rectangular patch centred about it.
"""
# If either x or y is nan
if any([np.isnan(i) for i in [x, y]]):
return None
if np.isnan(dx):
dx = 0
if np.isnan(dy):
dy = 0
llc = (x - dx, y - dy)
return matplotlib.patches.Rectangle(llc, 2 * dx, 2 * dy, **kwargs)
def draw_vector(v0, v1, ax=None, **kwargs):
"""
Plots an arrow represnting the direction and magnitue of a principal
component on a biaxial plot.
Modified after <NAME>' Python Data Science Handbook
https://jakevdp.github.io/PythonDataScienceHandbook/ \
05.09-principal-component-analysis.html
Todo
-----
Update for ternary plots.
"""
ax = ax
arrowprops = dict(arrowstyle="->", linewidth=2, shrinkA=0, shrinkB=0)
arrowprops.update(kwargs)
ax.annotate("", v1, v0, arrowprops=arrowprops)
def vector_to_line(
mu: np.array, vector: np.array, variance: float, spans: int = 4, expand: int = 10
):
"""
Creates an array of points representing a line along a vector - typically
for principal component analysis. Modified after <NAME>' Python Data
Science Handbook https://jakevdp.github.io/PythonDataScienceHandbook/ \
05.09-principal-component-analysis.html
"""
length = np.sqrt(variance)
parts = np.linspace(-spans, spans, expand * spans + 1)
line = length * np.dot(parts[:, np.newaxis], vector[np.newaxis, :]) + mu
line = length * parts.reshape(parts.shape[0], 1) * vector + mu
return line
def plot_stdev_ellipses(
comp, nstds=4, scale=100, resolution=1000, transform=None, ax=None, **kwargs
):
"""
Plot covariance ellipses at a number of standard deviations from the mean.
Parameters
-------------
comp : :class:`numpy.ndarray`
Composition to use.
nstds : :class:`int`
Number of standard deviations from the mean for which to plot the ellipses.
scale : :class:`float`
Scale applying to all x-y data points. For intergration with python-ternary.
transform : :class:`callable`
Function for transformation of data prior to plotting (to either 2D or 3D).
ax : :class:`matplotlib.axes.Axes`
Axes to plot on.
Returns
-------
ax : :class:`matplotlib.axes.Axes`
"""
mean, cov = np.nanmean(comp, axis=0), nancov(comp)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[::-1]))
if ax is None:
projection = None
if callable(transform) and (transform is not None):
if transform(comp).shape[1] == 3:
projection = "ternary"
fig, ax = plt.subplots(1, subplot_kw=dict(projection=projection))
for nstd in np.arange(1, nstds + 1)[::-1]: # backwards for svg construction
# here we use the absolute eigenvalues
xsig, ysig = nstd * np.sqrt(np.abs(vals)) # n sigmas
ell = matplotlib.patches.Ellipse(
xy=mean.flatten(), width=2 * xsig, height=2 * ysig, angle=theta[:1]
)
points = interpolated_patch_path(ell, resolution=resolution).vertices
if callable(transform) and (transform is not None):
points = transform(points) # transform to compositional data
if points.shape[1] == 3:
ax_transfrom = (ax.transData + ax.transTernaryAxes.inverted()).inverted()
points = ax_transfrom.transform(points) # transform to axes coords
patch = matplotlib.patches.PathPatch(matplotlib.path.Path(points), **kwargs)
patch.set_edgecolor("k")
patch.set_alpha(1.0 / nstd)
patch.set_linewidth(0.5)
ax.add_artist(patch)
return ax
def plot_pca_vectors(comp, nstds=2, scale=100.0, transform=None, ax=None, **kwargs):
"""
Plot vectors corresponding to principal components and their magnitudes.
Parameters
-------------
comp : :class:`numpy.ndarray`
Composition to use.
nstds : :class:`int`
Multiplier for magnitude of individual principal component vectors.
scale : :class:`float`
Scale applying to all x-y data points. For intergration with python-ternary.
transform : :class:`callable`
Function for transformation of data prior to plotting (to either 2D or 3D).
ax : :class:`matplotlib.axes.Axes`
Axes to plot on.
Returns
-------
ax : :class:`matplotlib.axes.Axes`
Todo
-----
* Minor reimplementation of the sklearn PCA to avoid dependency.
https://en.wikipedia.org/wiki/Principal_component_analysis
"""
pca = PCA(n_components=2)
pca.fit(comp)
if ax is None:
fig, ax = plt.subplots(1)
for variance, vector in zip(pca.explained_variance_, pca.components_):
line = vector_to_line(pca.mean_, vector, variance, spans=nstds)
if callable(transform) and (transform is not None):
line = transform(line)
line *= scale
ax.plot(*line.T, **kwargs)
return ax
def plot_2dhull(data, ax=None, splines=False, s=0, **plotkwargs):
"""
Plots a 2D convex hull around an array of xy data points.
"""
if ax is None:
fig, ax = plt.subplots(1)
chull = scipy.spatial.ConvexHull(data, incremental=True)
x, y = data[chull.vertices].T
if not splines:
lines = ax.plot(np.append(x, [x[0]]), np.append(y, [y[0]]), **plotkwargs)
else:
# https://stackoverflow.com/questions/33962717/interpolating-a-closed-curve-using-scipy
tck, u = scipy.interpolate.splprep([x, y], per=True, s=s)
xi, yi = scipy.interpolate.splev(np.linspace(0, 1, 1000), tck)
lines = ax.plot(xi, yi, **plotkwargs)
return lines
def plot_cooccurence(arr, ax=None, normalize=True, log=False, colorbar=False, **kwargs):
"""
Plot the co-occurence frequency matrix for a given input.
Parameters
-----------
ax : :class:`matplotlib.axes.Axes`, :code:`None`
The subplot to draw on.
normalize : :class:`bool`
Whether to normalize the cooccurence to compare disparate variables.
log : :class:`bool`
Whether to take the log of the cooccurence.
colorbar : :class:`bool`
Whether to append a colorbar.
Returns
--------
:class:`matplotlib.axes.Axes`
Axes on which the cooccurence plot is added.
"""
arr = np.array(arr)
if ax is None:
fig, ax = plt.subplots(1, figsize=(4 + [0.0, 0.2][colorbar], 4))
co_occur = cooccurence_pattern(arr, normalize=normalize, log=log)
heatmap = ax.pcolor(co_occur, **kwargs)
ax.set_yticks(np.arange(co_occur.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(co_occur.shape[1]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
if colorbar:
add_colorbar(heatmap, **kwargs)
return ax
def nan_scatter(xdata, ydata, ax=None, axes_width=0.2, **kwargs):
"""
Scatter plot with additional marginal axes to plot data for which data is partially
missing. Additional keyword arguments are passed to matplotlib.
Parameters
----------
xdata : :class:`numpy.ndarray`
X data
ydata: class:`numpy.ndarray` | pd.Series
Y data
ax : :class:`matplotlib.axes.Axes`
Axes on which to plot.
axes_width : :class:`float`
Width of the marginal axes.
Returns
-------
:class:`matplotlib.axes.Axes`
Axes on which the nan_scatter is plotted.
"""
if ax is None:
fig, ax = plt.subplots(1)
ax.scatter(xdata, ydata, **kwargs)
if hasattr(ax, "divider"): # Don't rebuild axes
div = ax.divider
nanaxx = div.nanaxx
nanaxy = div.nanaxy
else: # Build axes
nanaxx = subaxes(ax, side="bottom", width=axes_width)
nanaxx.invert_yaxis()
nanaxy = subaxes(ax, side="left", width=axes_width)
nanaxy.invert_xaxis()
ax.divider.nanaxx = nanaxx # assign for later use
ax.divider.nanaxy = nanaxy
nanxdata = xdata[(np.isnan(ydata) & np.isfinite(xdata))]
nanydata = ydata[(np.isnan(xdata) & np.isfinite(ydata))]
# yminmax = np.nanmin(ydata), np.nanmax(ydata)
no_ybins = 50
ybinwidth = (np.nanmax(ydata) - np.nanmin(ydata)) / no_ybins
ybins = np.linspace(np.nanmin(ydata), np.nanmax(ydata) + ybinwidth, no_ybins)
nanaxy.hist(nanydata, bins=ybins, orientation="horizontal", **kwargs)
nanaxy.scatter(
10 * np.ones_like(nanydata) + 5 * np.random.randn(len(nanydata)),
nanydata,
zorder=-1,
**kwargs
)
# xminmax = np.nanmin(xdata), np.nanmax(xdata)
no_xbins = 50
xbinwidth = (np.nanmax(xdata) - np.nanmin(xdata)) / no_xbins
xbins = np.linspace(np.nanmin(xdata), np.nanmax(xdata) + xbinwidth, no_xbins)
nanaxx.hist(nanxdata, bins=xbins, **kwargs)
nanaxx.scatter(
nanxdata,
10 * | np.ones_like(nanxdata) | numpy.ones_like |
from models.Synapses import Synapse
import numpy as np
class Connection:
def __init__(self, pre, post, weight_change=True):
self.pre = pre
self.post = post
self.synapses = []
self.weight_in_time = None
if weight_change:
self.weight_in_time = []
def add(self, pre_indices, post_indices, mu=0.5, sigma=0.01, **kwargs):
for i in pre_indices:
for j in post_indices:
syn = Synapse(self.pre.neurons[i], self.post.neurons[j],
np.random.normal(mu, sigma), **kwargs)
self.synapses.append(syn)
self.pre.neurons[i].target_synapses.append(syn)
return self
def apply(self, connection_type, mu=0.5, sigma=0.01, **kwargs):
if self.weight_in_time is not None:
self.weight_in_time.append( | np.zeros((self.pre.size, self.post.size)) | numpy.zeros |
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.utils.data as data
import os, math
import sys
import os.path as osp
from os.path import *
import numpy as np
import numpy.random as npr
import cv2
import scipy.io
import copy
import glob
try:
import cPickle # Use cPickle on Python 2.7
except ImportError:
import pickle as cPickle
import datasets
from fcn.config import cfg
from utils.blob import pad_im, chromatic_transform, add_noise, add_noise_cuda
from transforms3d.quaternions import mat2quat, quat2mat
from utils.se3 import *
from utils.pose_error import *
from utils.cython_bbox import bbox_overlaps
class YCBVideo(data.Dataset, datasets.imdb):
def __init__(self, image_set, ycb_video_path = None):
self._name = 'ycb_video_' + image_set
self._image_set = image_set
self._ycb_video_path = self._get_default_path() if ycb_video_path is None \
else ycb_video_path
path = os.path.join(self._ycb_video_path, 'data')
if not os.path.exists(path):
path = os.path.join(self._ycb_video_path, 'YCB_Video_Dataset/YCB_Video_Dataset/YCB_Video_Dataset/data')
self._data_path = path
self._model_path = os.path.join(datasets.ROOT_DIR, 'data', 'models')
# define all the classes
self._classes_all = ('__background__', '002_master_chef_can', '003_cracker_box', '004_sugar_box', '005_tomato_soup_can', '006_mustard_bottle', \
'007_tuna_fish_can', '008_pudding_box', '009_gelatin_box', '010_potted_meat_can', '011_banana', '019_pitcher_base', \
'021_bleach_cleanser', '024_bowl', '025_mug', '035_power_drill', '036_wood_block', '037_scissors', '040_large_marker', \
'051_large_clamp', '052_extra_large_clamp', '061_foam_brick')
self._num_classes_all = len(self._classes_all)
self._class_colors_all = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), \
(128, 0, 0), (0, 128, 0), (0, 0, 128), (128, 128, 0), (128, 0, 128), (0, 128, 128), \
(64, 0, 0), (0, 64, 0), (0, 0, 64), (64, 64, 0), (64, 0, 64), (0, 64, 64),
(192, 0, 0), (0, 192, 0), (0, 0, 192)]
self._symmetry_all = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]).astype(np.float32)
self._extents_all = self._load_object_extents()
self._width = 640
self._height = 480
self._intrinsic_matrix = np.array([[1.066778e+03, 0.000000e+00, 3.129869e+02],
[0.000000e+00, 1.067487e+03, 2.413109e+02],
[0.000000e+00, 0.000000e+00, 1.000000e+00]])
# select a subset of classes
self._classes = [self._classes_all[i] for i in cfg.TRAIN.CLASSES]
self._classes_test = [self._classes_all[i] for i in cfg.TEST.CLASSES]
self._num_classes = len(self._classes)
self._class_colors = [self._class_colors_all[i] for i in cfg.TRAIN.CLASSES]
self._symmetry = self._symmetry_all[cfg.TRAIN.CLASSES]
self._symmetry_test = self._symmetry_all[cfg.TEST.CLASSES]
self._extents = self._extents_all[cfg.TRAIN.CLASSES]
self._extents_test = self._extents_all[cfg.TEST.CLASSES]
self._pixel_mean = cfg.PIXEL_MEANS / 255.0
# train classes
self._points, self._points_all, self._point_blob = \
self._load_object_points(self._classes, self._extents, self._symmetry)
# test classes
self._points_test, self._points_all_test, self._point_blob_test = \
self._load_object_points(self._classes_test, self._extents_test, self._symmetry_test)
# 3D model paths
self.model_mesh_paths = ['{}/{}/textured_simple.obj'.format(self._model_path, cls) for cls in self._classes_all[1:]]
self.model_sdf_paths = ['{}/{}/textured_simple_low_res.pth'.format(self._model_path, cls) for cls in self._classes_all[1:]]
self.model_texture_paths = ['{}/{}/texture_map.png'.format(self._model_path, cls) for cls in self._classes_all[1:]]
self.model_colors = [np.array(self._class_colors_all[i]) / 255.0 for i in range(1, len(self._classes_all))]
self.model_mesh_paths_target = ['{}/{}/textured_simple.obj'.format(self._model_path, cls) for cls in self._classes[1:]]
self.model_sdf_paths_target = ['{}/{}/textured_simple.sdf'.format(self._model_path, cls) for cls in self._classes[1:]]
self.model_texture_paths_target = ['{}/{}/texture_map.png'.format(self._model_path, cls) for cls in self._classes[1:]]
self.model_colors_target = [np.array(self._class_colors_all[i]) / 255.0 for i in cfg.TRAIN.CLASSES[1:]]
self._class_to_ind = dict(zip(self._classes, range(self._num_classes)))
self._image_index = self._load_image_set_index(image_set)
self._size = len(self._image_index)
if self._size > cfg.TRAIN.MAX_ITERS_PER_EPOCH * cfg.TRAIN.IMS_PER_BATCH:
self._size = cfg.TRAIN.MAX_ITERS_PER_EPOCH * cfg.TRAIN.IMS_PER_BATCH
self._roidb = self.gt_roidb()
assert os.path.exists(self._ycb_video_path), \
'ycb_video path does not exist: {}'.format(self._ycb_video_path)
assert os.path.exists(self._data_path), \
'Data path does not exist: {}'.format(self._data_path)
def __getitem__(self, index):
is_syn = 0
roidb = self._roidb[index]
# Get the input image blob
random_scale_ind = npr.randint(0, high=len(cfg.TRAIN.SCALES_BASE))
im_blob, im_depth, im_scale, height, width = self._get_image_blob(roidb, random_scale_ind)
# build the label blob
label_blob, mask, meta_data_blob, pose_blob, gt_boxes, vertex_targets, vertex_weights \
= self._get_label_blob(roidb, self._num_classes, im_scale, height, width)
is_syn = roidb['is_syn']
im_info = np.array([im_blob.shape[1], im_blob.shape[2], im_scale, is_syn], dtype=np.float32)
sample = {'image_color': im_blob,
'im_depth': im_depth,
'label': label_blob,
'mask': mask,
'meta_data': meta_data_blob,
'poses': pose_blob,
'extents': self._extents,
'points': self._point_blob,
'symmetry': self._symmetry,
'gt_boxes': gt_boxes,
'im_info': im_info,
'video_id': roidb['video_id'],
'image_id': roidb['image_id']}
if cfg.TRAIN.VERTEX_REG:
sample['vertex_targets'] = vertex_targets
sample['vertex_weights'] = vertex_weights
return sample
def _get_image_blob(self, roidb, scale_ind):
# rgba
rgba = pad_im(cv2.imread(roidb['image'], cv2.IMREAD_UNCHANGED), 16)
if rgba.shape[2] == 4:
im = np.copy(rgba[:,:,:3])
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
im[I[0], I[1], :] = 0
else:
im = rgba
im_scale = cfg.TRAIN.SCALES_BASE[scale_ind]
if im_scale != 1.0:
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
height = im.shape[0]
width = im.shape[1]
if roidb['flipped']:
im = im[:, ::-1, :]
# chromatic transform
if cfg.TRAIN.CHROMATIC and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = chromatic_transform(im)
if cfg.TRAIN.ADD_NOISE and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = add_noise(im)
im_tensor = torch.from_numpy(im) / 255.0
im_tensor -= self._pixel_mean
image_blob = im_tensor.permute(2, 0, 1).float()
# depth image
im_depth = pad_im(cv2.imread(roidb['depth'], cv2.IMREAD_UNCHANGED), 16)
if im_scale != 1.0:
im_depth = cv2.resize(im_depth, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST)
im_depth = im_depth.astype('float') / 10000.0
return image_blob, im_depth, im_scale, height, width
def _get_label_blob(self, roidb, num_classes, im_scale, height, width):
""" build the label blob """
meta_data = scipy.io.loadmat(roidb['meta_data'])
meta_data['cls_indexes'] = meta_data['cls_indexes'].flatten()
classes = np.array(cfg.TRAIN.CLASSES)
# read label image
im_label = pad_im(cv2.imread(roidb['label'], cv2.IMREAD_UNCHANGED), 16)
if roidb['flipped']:
if len(im_label.shape) == 2:
im_label = im_label[:, ::-1]
else:
im_label = im_label[:, ::-1, :]
if im_scale != 1.0:
im_label = cv2.resize(im_label, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST)
label_blob = np.zeros((num_classes, height, width), dtype=np.float32)
label_blob[0, :, :] = 1.0
for i in range(1, num_classes):
I = np.where(im_label == classes[i])
if len(I[0]) > 0:
label_blob[i, I[0], I[1]] = 1.0
label_blob[0, I[0], I[1]] = 0.0
# foreground mask
seg = torch.from_numpy((im_label != 0).astype(np.float32))
mask = seg.unsqueeze(0).repeat((3, 1, 1)).float()
# poses
poses = meta_data['poses']
if len(poses.shape) == 2:
poses = np.reshape(poses, (3, 4, 1))
if roidb['flipped']:
poses = _flip_poses(poses, meta_data['intrinsic_matrix'], width)
num = poses.shape[2]
pose_blob = np.zeros((num_classes, 9), dtype=np.float32)
gt_boxes = np.zeros((num_classes, 5), dtype=np.float32)
count = 0
for i in range(num):
cls = int(meta_data['cls_indexes'][i])
ind = np.where(classes == cls)[0]
if len(ind) > 0:
R = poses[:, :3, i]
T = poses[:, 3, i]
pose_blob[count, 0] = 1
pose_blob[count, 1] = ind
qt = mat2quat(R)
# egocentric to allocentric
qt_allocentric = egocentric2allocentric(qt, T)
if qt_allocentric[0] < 0:
qt_allocentric = -1 * qt_allocentric
pose_blob[count, 2:6] = qt_allocentric
pose_blob[count, 6:] = T
# compute box
x3d = np.ones((4, self._points_all.shape[1]), dtype=np.float32)
x3d[0, :] = self._points_all[ind,:,0]
x3d[1, :] = self._points_all[ind,:,1]
x3d[2, :] = self._points_all[ind,:,2]
RT = | np.zeros((3, 4), dtype=np.float32) | numpy.zeros |
"""
Many of these tests use the minimal test/data/gdc.bed file which has just
enough complexity to be useful in testing corner cases. When reading through
the tests, it's useful to have that file open to understand what's happening.
"""
import os
import metaseq
import multiprocessing
from metaseq.array_helpers import ArgumentError
import numpy as np
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
nan = np.nan
inf = np.inf
gs = {}
for kind in ['bed', 'bam', 'bigbed', 'bigwig']:
gs[kind] = metaseq.genomic_signal(metaseq.example_filename('gdc.%s' % kind), kind)
PROCESSES = int(os.environ.get("METASEQ_PROCESSES", multiprocessing.cpu_count()))
def test_tointerval():
assert metaseq.helpers.tointerval("chr2L:1-10[-]").strand == '-'
assert metaseq.helpers.tointerval("chr2L:1-10[+]").strand == '+'
assert metaseq.helpers.tointerval("chr2L:1-10").strand == '.'
def test_local_count():
def check(kind, coord, expected, stranded):
try:
result = gs[kind].local_count(coord, stranded=stranded)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert result == expected, (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, expected, stranded in (
('chr2L:1-80', 3, False), # easy case
('chr2L:1000-3000', 0, False), # above upper boundary
('chr2L:1-9', 0, False), # below lower boundary
('chr2L:71-73[-]', 2, False), # unstranded = 2
('chr2L:71-73[-]', 1, True), # stranded = 1
('chr2L:70-71', 2, False), # pathological corner case
# ('chr2L:75-76', 0, False), # pathological corner case
):
yield check, kind, coord, expected, stranded
def test_local_coverage_stranded():
def check(kind, coord, expected):
try:
result = gs[kind].local_coverage(coord)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
('chr2L:1-20[-]',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.])[::-1],
# note reverse------------------------------------------------------------------------^^^^^^
),
),
('chr2L:68-76[-]',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 2., 2., 2., 2., 2., 0.])[::-1],
# note reverse----------------------------^^^^^^
),
),
):
yield check, kind, coord, expected
def test_local_coverage_shifted():
def check(kind, coord, shift_width, expected):
try:
result = gs[kind].local_coverage(coord, shift_width=shift_width)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, shift_width, expected in (
('chr2L:1-20', -2,
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0.]),
),
),
# this one is complex, because the minus-strand read shifts left,
# and the plus-strand shifts right.
('chr2L:68-76', 1,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 1., 1., 2., 2., 2., 1., 1.]),
),
),
# shift the reads all the way out of the window...
('chr2L:68-76', 10,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 0., 0., 0., 0., 0., 0.]),
),
),
):
yield check, kind, coord, shift_width, expected
def test_local_coverage_read_strand():
"""
checks stranded full binning
excludes bigwig since strand doesn't make sense for that format.
"""
def check(kind, coord, read_strand, expected):
try:
result = gs[kind].local_coverage(coord, read_strand=read_strand)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, read_strand, expected in (
('chr2L:1-20', '+',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.]),
),
),
('chr2L:1-20', '-',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
),
),
):
yield check, kind, coord, read_strand, expected
def test_local_coverage_fragment_size():
def check(kind, coord, fragment_size, expected):
try:
result = gs[kind].local_coverage(coord, fragment_size=fragment_size)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed']:
for coord, fragment_size, expected in (
('chr2L:1-20', 7,
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0.]),
),
),
('chr2L:68-76', 6,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 1., 2., 2., 2., 2., 2., 1.]),
),
),
('chr2L:68-76', 1,
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 1., 0., 0., 0., 1., 0.]),
),
),
):
yield check, kind, coord, fragment_size, expected
def test_local_coverage_score():
def check(kind, coord, expected):
try:
result = gs[kind].local_coverage(coord, use_score=True)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bigbed', 'bed']:
for coord, expected in (
('chr2L:1-20',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 255., 255., 255., 255., 255., 0., 0., 0., 0., 0.]),
),
),
('chr2L:68-76',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 510., 510., 510., 510., 510., 0.]),
),
),
):
yield check, kind, coord, expected
def test_local_coverage_full():
"""generator of tests for local coverage
ensures that all formats are consistent in their results when retrieving
the full un-binned data.
"""
def check(kind, coord, processes, expected):
try:
result = gs[kind].local_coverage(coord, processes=processes)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
assert np.all(result[0] == expected[0]) and np.all(result[1] == expected[1]), (kind, coord, result)
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
('chr2L:1-20',
(
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.]),
),
),
('chr2L:68-76',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 2., 2., 2., 2., 2., 0.]),
),
),
('chr2L:568-576',
(
np.array([568, 569, 570, 571, 572, 573, 574, 575]),
np.array([0., 0., 0., 0., 0., 0., 0., 0.]),
),
),
):
for processes in [None, PROCESSES]:
yield check, kind, coord, processes, expected
def test_local_coverage_binned():
"""generator of tests for local coverage
ensures that all formats are consistent in their results when retrieving
binned data.
"""
def check(kind, coord, processes, expected):
if kind == 'bigwig':
result = gs[kind].local_coverage(coord, bins=8, method='get_as_array', processes=processes)
else:
try:
result = gs[kind].local_coverage(coord, bins=8, processes=processes)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
try:
assert np.allclose(result[0], expected[0]) and np.allclose(result[1], expected[1])
except:
print (kind, coord, result, expected)
raise
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
('chr2L:1-20',
(
np.array([ 1., 3.57142857, 6.14285714, 8.71428571, 11.28571429, 13.85714286, 16.42857143, 19.]),
np.array([ 0., 0., 0., 0., 1., 1., 0., 0. ]),
),
),
('chr2L:68-76',
(
np.array([68, 69, 70, 71, 72, 73, 74, 75]),
np.array([0., 0., 2., 2., 2., 2., 2., 0.]),
),
),
):
for processes in [None, PROCESSES]:
yield check, kind, coord, processes, expected
def test_array_binned():
def check(kind, coord, processes, expected):
if kind == 'bigwig':
result = gs[kind].array(coord, bins=8, method='get_as_array', processes=processes)
else:
try:
result = gs[kind].array(coord, bins=8, processes=processes)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
try:
assert np.allclose(result, expected)
except:
print (kind, coord, result, expected)
raise
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
(['chr2L:1-20'],
np.array([[0., 0., 0., 0., 1., 1., 0., 0. ]]),
),
(['chr2L:1-20', 'chr2L:1-20[-]'],
np.array([[0., 0., 0., 0., 1., 1., 0., 0. ],
[0., 0., 1., 1., 0., 0., 0., 0. ]]),
),
(['chr2L:68-76'],
np.array([[0., 0., 2., 2., 2., 2., 2., 0.]]),
),
):
for processes in [None, PROCESSES]:
yield check, kind, coord, processes, expected
def test_array_binned_preserve_total():
def check(kind, coord, processes, expected):
kwargs = dict(features=coord, bins=8, processes=processes, preserve_total=True)
if kind == 'bigwig':
assert_raises(ArgumentError, gs[kind].array, method='get_as_array', **kwargs)
return
else:
try:
result = gs[kind].array(**kwargs)
except NotImplementedError:
raise SkipTest("Incompatible bx-python version for bigBed")
try:
assert np.allclose(result, expected)
except:
print (kind, coord, result, expected)
raise
for kind in ['bam', 'bigbed', 'bed', 'bigwig']:
for coord, expected in (
(['chr2L:1-20'],
np.array([[0., 0., 0., 0., .5, .5, 0., 0. ]]),
),
(['chr2L:1-20', 'chr2L:1-20[-]'],
np.array([[0., 0., 0., 0., .5, .5, 0., 0. ],
[0., 0., .5, .5, 0., 0., 0., 0. ]]),
),
(['chr2L:68-76'],
| np.array([[0., 0., .4, .4, .4, .4, .4, 0.]]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.